code stringlengths 31 1.05M | apis list | extract_api stringlengths 97 1.91M |
|---|---|---|
import os
import sys
import time
import glob
import shutil
import argparse
import cv2
import numpy as np
sys.path.insert(0, '..')
import plantid
def imread_ex(filename, flags=-1):
try:
return cv2.imdecode(np.fromfile(filename, dtype=np.uint8), flags)
except Exception as e:
return None
def split_images_by_identify(src_dir, dst_dir):
plant_identifier = plantid.PlantIdentifier()
filenames = glob.glob(os.path.join(src_dir, '*'))
start_time = time.time()
for k, filename in enumerate(filenames):
image = imread_ex(filename)
outputs = plant_identifier.identify(image, topk=1)
if outputs['status'] == 0:
chinese_name = outputs['results'][0]['chinese_name']
latin_name = outputs['results'][0]['latin_name']
confidence = outputs['results'][0]['probability']
if latin_name == '':
taxon_name = chinese_name
else:
taxon_name = '{} {}'.format(chinese_name, latin_name)
if confidence > 0.1:
dst_subdir = os.path.join(dst_dir, taxon_name)
os.makedirs(dst_subdir, exist_ok=True)
dst_filename = os.path.join(dst_subdir, '{:.3f}_{}'.format(confidence, os.path.basename(filename)))
shutil.move(filename, dst_filename)
print('[{}/{}] Time: {:.3f}s {}'.format(k+1, len(filenames), time.time() - start_time, filename))
start_time = time.time()
def parse_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument('--src_dir', type=str, default='E:/test_images')
parser.add_argument('--dst_dir', type=str, default='E:/test_images_results')
return parser.parse_args(argv)
if __name__ == '__main__':
args = parse_arguments(sys.argv[1:])
if not os.path.exists(args.src_dir):
raise ValueError('src_dir does not exist!')
split_images_by_identify(args.src_dir, dst_dir=args.dst_dir)
| [
"os.path.exists",
"numpy.fromfile",
"sys.path.insert",
"argparse.ArgumentParser",
"os.makedirs",
"shutil.move",
"os.path.join",
"plantid.PlantIdentifier",
"os.path.basename",
"time.time"
] | [((107, 131), 'sys.path.insert', 'sys.path.insert', (['(0)', '""".."""'], {}), "(0, '..')\n", (122, 131), False, 'import sys\n'), ((403, 428), 'plantid.PlantIdentifier', 'plantid.PlantIdentifier', ([], {}), '()\n', (426, 428), False, 'import plantid\n'), ((505, 516), 'time.time', 'time.time', ([], {}), '()\n', (514, 516), False, 'import time\n'), ((1562, 1587), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1585, 1587), False, 'import argparse\n'), ((460, 486), 'os.path.join', 'os.path.join', (['src_dir', '"""*"""'], {}), "(src_dir, '*')\n", (472, 486), False, 'import os\n'), ((1508, 1519), 'time.time', 'time.time', ([], {}), '()\n', (1517, 1519), False, 'import time\n'), ((1866, 1894), 'os.path.exists', 'os.path.exists', (['args.src_dir'], {}), '(args.src_dir)\n', (1880, 1894), False, 'import os\n'), ((221, 258), 'numpy.fromfile', 'np.fromfile', (['filename'], {'dtype': 'np.uint8'}), '(filename, dtype=np.uint8)\n', (232, 258), True, 'import numpy as np\n'), ((1106, 1139), 'os.path.join', 'os.path.join', (['dst_dir', 'taxon_name'], {}), '(dst_dir, taxon_name)\n', (1118, 1139), False, 'import os\n'), ((1156, 1194), 'os.makedirs', 'os.makedirs', (['dst_subdir'], {'exist_ok': '(True)'}), '(dst_subdir, exist_ok=True)\n', (1167, 1194), False, 'import os\n'), ((1344, 1379), 'shutil.move', 'shutil.move', (['filename', 'dst_filename'], {}), '(filename, dst_filename)\n', (1355, 1379), False, 'import shutil\n'), ((1450, 1461), 'time.time', 'time.time', ([], {}), '()\n', (1459, 1461), False, 'import time\n'), ((1299, 1325), 'os.path.basename', 'os.path.basename', (['filename'], {}), '(filename)\n', (1315, 1325), False, 'import os\n')] |
"""
drift/continuous.py
===================
GSadjust code for calculating continuous-model drift correction.
--------------------------------------------------------------------------------
This software is preliminary, provisional, and is subject to revision. It is
being provided to meet the need for timely best science. The software has not
received final approval by the U.S. Geological Survey (USGS). No warranty,
expressed or implied, is made by the USGS or the U.S. Government as to the
functionality of the software and related material nor shall the fact of release
constitute any such warranty. The software is provided on the condition that
neither the USGS nor the U.S. Government shall be held liable for any damages
resulting from the authorized or unauthorized use of the software.
"""
import logging
import numpy as np
from scipy.interpolate import UnivariateSpline
from ..data import DeltaNormal
N_PTS_IN_INTERPOLATION = 300
N_PTS_IN_EXTRAPOLATION = 200
def drift_continuous(
data,
plot_data,
drift_x,
drift_rate,
method_key,
tension_slider_value,
extrapolation_type,
weight_obs,
min_time,
max_time,
loop_name,
):
"""Interpolate drift model: polynomial, spline, etc. at N_PTS_IN_INTERPOLATION
points, plus N_PTS_IN_EXTRAPOLATION on either side.
These values need to be relatively small for decent performance.
Parameters
----------
data : list
list of ObsTreeStations
plot_data : list
One entry per station. Each entry is a list, in the order:
[[plot time values],
[plot g values],
station name,
[g standard deviation],
[time standard deviation]]
drift_x : list
Drift time observations (x location of points on bottom plot)
drift_rate : list
Drift rate observations (y location of points on bottom plot)
method_key : {0, 1, 2, 3, 4}
Indicates type of drift correction.
0: Constant
1: Spline
2-4: Polynomial (degree is one less than the value)
tension_slider_value : int
Controls tension on the interpolated spline
extrapolation_type : {1, anything else}
Controls how interpolation is extended from the outermost data.
1: Constant
not 1: linearly extend the fitted curve at the same slope as the first/last
2 data points
weight_obs : int
Controls if observations are weighted when fitting a constant drift rate.
Only used if drift is set to constant, not for other methods.
0: no weighting
not 0: weighted
min_time : float
Time to extrapolate at the beginning. Should be the time of the first station
occupation of the loop.
max_time : float
Time to extrapolate at the end. Should be the time of the last station
occupation of the loop.
loop_name : str
loop name, for creating deltas
Returns
-------
delta_list : list
List of deltas
xp : ndarray
For plotting the bottom plot
yp : ndarray
For plotting the bottom plot
z_main : (mean_drift, sigma)
These are displayed on the plot.
"""
xp = np.linspace(min(drift_x), max(drift_x), N_PTS_IN_INTERPOLATION) # constant
drift_stats = None
z_main = []
if method_key == 0: # constant drift correction
if weight_obs == 0:
mean_drift = sum(drift_rate) / len(drift_rate)
sigma = np.std(drift_rate) / np.sqrt(len(drift_rate))
yp = np.zeros(xp.size) + mean_drift
z_main = [(mean_drift, sigma)]
# Weight observations according to NGA method
else:
drifts, drift_w = [], []
for station_data in plot_data:
t, R, Rsd, tsd = (
station_data[0],
station_data[1],
station_data[3],
station_data[4],
)
if len(t) > 1:
for i in range(1, len(t)):
dr = R[i] - R[0]
dt = (t[i] - t[0]) * 24
sdr = np.sqrt(Rsd[i] ** 2 + Rsd[0] ** 2)
sdt = np.sqrt(tsd[i] ** 2 + tsd[0] ** 2)
drifts.append(dr / dt)
drift_sd = np.sqrt(
sdr ** 2 / dt ** 2 + dr ** 2 * sdt ** 2 / dt ** 4
)
drift_w.append(1 / drift_sd ** 2)
num = []
for idx, w in enumerate(drift_w):
num.append(w * drifts[idx])
mean_drift = np.sum(num) / np.sum(drift_w)
num = []
for idx, w in enumerate(drift_w):
num.append(w * (drifts[idx] - mean_drift) ** 2)
sigma_d = np.sqrt(np.sum(num) / ((len(drift_w) - 1) * np.sum(drift_w)))
drift_stats = dict()
drift_stats["t0"] = plot_data[0][0][0]
drift_stats["sigma_d"] = sigma_d
drift_stats["mean_drift"] = mean_drift
yp = np.zeros(xp.size) + mean_drift
z_main = [(mean_drift, sigma_d)]
else:
x0 = [f - np.min(drift_x) for f in drift_x]
xp0 = [f - np.min(xp) for f in xp]
idx = sorted(range(len(x0)), key=lambda xpt: x0[xpt])
x_sorted, drift_rate_sorted = [], []
for i in idx:
x_sorted.append(x0[i])
drift_rate_sorted.append(drift_rate[i])
x0 = x_sorted
drift_rate = drift_rate_sorted
if method_key == 9:
pass
if method_key == 1: # spline
try:
s = UnivariateSpline(x0, drift_rate, k=3, s=tension_slider_value)
xs = np.linspace(x0[0], x0[-1], N_PTS_IN_INTERPOLATION)
yp = s(xs)
logging.info(
"Spline drift correction, tension={}".format(tension_slider_value)
)
except Exception:
raise IndexError
else:
# Polynomial interpolation. Degree is one less than the method key, e.g.,
# method_key == 2 is 1st order polynomial, etc.
try:
z_main = np.polyfit(x0, drift_rate, method_key - 1)
p = np.poly1d(z_main)
yp = p(xp0)
logging.info(
"Polynomial drift correction degree {}".format(method_key - 1)
)
except np.linalg.LinAlgError as e:
return np.linalg.LinAlgError
# Method for extrapolating beyond fitted drift curve extent
if extrapolation_type == 1: # constant
new_xp = np.linspace(min_time, min(drift_x), N_PTS_IN_EXTRAPOLATION)
new_xp = np.append(new_xp, xp)
new_xp = np.append(new_xp, np.linspace(max(drift_x), max_time, 200))
xp = new_xp
new_yp = np.ones(200) * yp[0]
new_yp = np.append(new_yp, yp)
new_yp = np.append(new_yp, np.ones(200) * yp[-1])
yp = new_yp
else: # linear extrapolation from first two (and last two) points
# get first two points
x = xp[:2]
y = yp[:2]
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
new_xp1 = np.linspace(min_time, min(drift_x), 200)
yp1 = p(new_xp1)
x = xp[-2:]
y = yp[-2:]
z = np.polyfit(x, y, 1)
p = np.poly1d(z)
new_xp2 = np.linspace(max(drift_x), max_time, 200)
yp2 = p(new_xp2)
xp_temp = np.append(new_xp1, xp)
xp = np.append(xp_temp, new_xp2)
yp_temp = np.append(yp1, yp)
yp = np.append(yp_temp, yp2)
delta_list = calc_cont_dg(xp, yp, data, loop_name, drift_stats)
return delta_list, xp, yp, z_main
def calc_cont_dg(xp, yp, data, loop_name, drift_stats):
"""
Calculates delta-g's while removing drift using the input drift model
Parameters
----------
xp : ndarray
times of continuous drift model
yp : ndarray
continuous drift model
data : list
list of ObsTreeStations
loop_name : str
drift_stats : dict or None
If dict, observations are weighted; None if not
Returns
-------
list
list of deltas
"""
first = True
ypsum = [0]
delta_list = []
for x, drift_rate in zip(xp, yp):
if first:
first = False
prev_x = x
else:
prev_sum = ypsum[-1]
interval = (x - prev_x) * 24
prev_x = x
ypsum.append(prev_sum + drift_rate * interval)
xp = xp.tolist()
yp = ypsum # yp = yp.tolist()
prev_station = data.pop(0)
for station in data:
# If using weighted dg
if drift_stats:
station.assigned_sd = np.sqrt(
station.original_sd ** 2
+ ((station.tmean - drift_stats["t0"]) * 24) ** 2
* drift_stats["sigma_d"] ** 2
+ np.sqrt(station.t_stdev ** 2 + data[0].t_stdev ** 2)
* drift_stats["mean_drift"] ** 2
)
else:
station.assigned_sd = None
drift1_idx = min(
range(len(xp)), key=lambda i: abs(xp[i] - prev_station.tmean)
)
drift1 = yp[drift1_idx]
drift2_idx = min(range(len(xp)), key=lambda i: abs(xp[i] - station.tmean))
drift2 = yp[drift2_idx]
delta = DeltaNormal(
prev_station, station, driftcorr=drift2 - drift1, loop=loop_name
)
delta_list.append(delta)
prev_station = station
return delta_list
| [
"numpy.sqrt",
"numpy.ones",
"numpy.polyfit",
"scipy.interpolate.UnivariateSpline",
"numpy.min",
"numpy.append",
"numpy.sum",
"numpy.zeros",
"numpy.linspace",
"numpy.std",
"numpy.poly1d"
] | [((6783, 6804), 'numpy.append', 'np.append', (['new_xp', 'xp'], {}), '(new_xp, xp)\n', (6792, 6804), True, 'import numpy as np\n'), ((6957, 6978), 'numpy.append', 'np.append', (['new_yp', 'yp'], {}), '(new_yp, yp)\n', (6966, 6978), True, 'import numpy as np\n'), ((7209, 7228), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (7219, 7228), True, 'import numpy as np\n'), ((7241, 7253), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (7250, 7253), True, 'import numpy as np\n'), ((7390, 7409), 'numpy.polyfit', 'np.polyfit', (['x', 'y', '(1)'], {}), '(x, y, 1)\n', (7400, 7409), True, 'import numpy as np\n'), ((7422, 7434), 'numpy.poly1d', 'np.poly1d', (['z'], {}), '(z)\n', (7431, 7434), True, 'import numpy as np\n'), ((7537, 7559), 'numpy.append', 'np.append', (['new_xp1', 'xp'], {}), '(new_xp1, xp)\n', (7546, 7559), True, 'import numpy as np\n'), ((7573, 7600), 'numpy.append', 'np.append', (['xp_temp', 'new_xp2'], {}), '(xp_temp, new_xp2)\n', (7582, 7600), True, 'import numpy as np\n'), ((7619, 7637), 'numpy.append', 'np.append', (['yp1', 'yp'], {}), '(yp1, yp)\n', (7628, 7637), True, 'import numpy as np\n'), ((7651, 7674), 'numpy.append', 'np.append', (['yp_temp', 'yp2'], {}), '(yp_temp, yp2)\n', (7660, 7674), True, 'import numpy as np\n'), ((6919, 6931), 'numpy.ones', 'np.ones', (['(200)'], {}), '(200)\n', (6926, 6931), True, 'import numpy as np\n'), ((3491, 3509), 'numpy.std', 'np.std', (['drift_rate'], {}), '(drift_rate)\n', (3497, 3509), True, 'import numpy as np\n'), ((3554, 3571), 'numpy.zeros', 'np.zeros', (['xp.size'], {}), '(xp.size)\n', (3562, 3571), True, 'import numpy as np\n'), ((4663, 4674), 'numpy.sum', 'np.sum', (['num'], {}), '(num)\n', (4669, 4674), True, 'import numpy as np\n'), ((4677, 4692), 'numpy.sum', 'np.sum', (['drift_w'], {}), '(drift_w)\n', (4683, 4692), True, 'import numpy as np\n'), ((5105, 5122), 'numpy.zeros', 'np.zeros', (['xp.size'], {}), '(xp.size)\n', (5113, 5122), True, 'import numpy as np\n'), ((5209, 5224), 'numpy.min', 'np.min', (['drift_x'], {}), '(drift_x)\n', (5215, 5224), True, 'import numpy as np\n'), ((5262, 5272), 'numpy.min', 'np.min', (['xp'], {}), '(xp)\n', (5268, 5272), True, 'import numpy as np\n'), ((5683, 5744), 'scipy.interpolate.UnivariateSpline', 'UnivariateSpline', (['x0', 'drift_rate'], {'k': '(3)', 's': 'tension_slider_value'}), '(x0, drift_rate, k=3, s=tension_slider_value)\n', (5699, 5744), False, 'from scipy.interpolate import UnivariateSpline\n'), ((5766, 5816), 'numpy.linspace', 'np.linspace', (['x0[0]', 'x0[-1]', 'N_PTS_IN_INTERPOLATION'], {}), '(x0[0], x0[-1], N_PTS_IN_INTERPOLATION)\n', (5777, 5816), True, 'import numpy as np\n'), ((6248, 6290), 'numpy.polyfit', 'np.polyfit', (['x0', 'drift_rate', '(method_key - 1)'], {}), '(x0, drift_rate, method_key - 1)\n', (6258, 6290), True, 'import numpy as np\n'), ((6311, 6328), 'numpy.poly1d', 'np.poly1d', (['z_main'], {}), '(z_main)\n', (6320, 6328), True, 'import numpy as np\n'), ((7014, 7026), 'numpy.ones', 'np.ones', (['(200)'], {}), '(200)\n', (7021, 7026), True, 'import numpy as np\n'), ((4854, 4865), 'numpy.sum', 'np.sum', (['num'], {}), '(num)\n', (4860, 4865), True, 'import numpy as np\n'), ((4174, 4208), 'numpy.sqrt', 'np.sqrt', (['(Rsd[i] ** 2 + Rsd[0] ** 2)'], {}), '(Rsd[i] ** 2 + Rsd[0] ** 2)\n', (4181, 4208), True, 'import numpy as np\n'), ((4239, 4273), 'numpy.sqrt', 'np.sqrt', (['(tsd[i] ** 2 + tsd[0] ** 2)'], {}), '(tsd[i] ** 2 + tsd[0] ** 2)\n', (4246, 4273), True, 'import numpy as np\n'), ((4356, 4414), 'numpy.sqrt', 'np.sqrt', (['(sdr ** 2 / dt ** 2 + dr ** 2 * sdt ** 2 / dt ** 4)'], {}), '(sdr ** 2 / dt ** 2 + dr ** 2 * sdt ** 2 / dt ** 4)\n', (4363, 4414), True, 'import numpy as np\n'), ((4890, 4905), 'numpy.sum', 'np.sum', (['drift_w'], {}), '(drift_w)\n', (4896, 4905), True, 'import numpy as np\n'), ((8989, 9041), 'numpy.sqrt', 'np.sqrt', (['(station.t_stdev ** 2 + data[0].t_stdev ** 2)'], {}), '(station.t_stdev ** 2 + data[0].t_stdev ** 2)\n', (8996, 9041), True, 'import numpy as np\n')] |
import argparse
import logging
import pickle
import time
from pathlib import Path
#
# Configure the path
ROOT_PATH = Path(__file__).resolve().parent.parent
DATA_PATH = ROOT_PATH / 'data'
MODELS_PATH = ROOT_PATH / 'models'
UTILS_PATH = ROOT_PATH / 'utils'
import numpy as np
import pandas as pd
import yaml
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import torch
from box import Box
from fastai.metrics import accuracy, error_rate
from fastai.text import (AWD_LSTM, TextClasDataBunch, TextLMDataBunch,
language_model_learner, text_classifier_learner)
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('party prediction')
def fit_model(df, filename, fit_lm:True):
LANGUAGEMODEL_FILE = MODELS_PATH / f'ft_enc_it_{filename}'
df_trn, df_val = train_test_split(df,
test_size=0.2,
random_state=42)
data_lm = TextLMDataBunch.from_df(train_df=df_trn,
valid_df=df_val,
path="",
min_freq=1)
learn = language_model_learner(data_lm,
arch=AWD_LSTM,
pretrained=True,
drop_mult=0.3)
if fit_lm:
# Run one epoch with lower layers
logger.info('Fit frozen')
learn.fit_one_cycle(5,
max_lr=1e-3, moms=(0.8, 0.7))
learn.unfreeze()
logger.info('Fit unfrozen')
learn.fit_one_cycle(5,
max_lr=1e-3, moms=(0.8, 0.7))
logger.info('Saving language model')
learn.save_encoder(LANGUAGEMODEL_FILE)
data_clas = TextClasDataBunch.from_df(path="",
train_df=df_trn,
valid_df=df_val,
vocab=data_lm.train_ds.vocab,
bs=64)
data_clas.save(MODELS_PATH / f'databunch_{filename}')
learn_clas = text_classifier_learner(data_clas,
AWD_LSTM,
drop_mult=0.5,
metrics=[accuracy, error_rate])
logger.info('Loading language model')
learn_clas.load_encoder(LANGUAGEMODEL_FILE)
lr = 1e-2
lrm = 2.6
lrs = np.array([lr/(lrm**4), lr/(lrm**3), lr/(lrm**2), lr/lrm, lr])
logger.info('Fit one cycle')
learn_clas.fit_one_cycle(5, lrs)
logger.info('Fit one cycle')
learn_clas.freeze_to(-2)
logger.info('Fit one cycle')
learn_clas.fit_one_cycle(5, lrs)
logger.info('Fit one cycle')
learn_clas.freeze_to(-3)
learn_clas.fit_one_cycle(2, lrs)
learn_clas.freeze_to(-4)
learn_clas.fit_one_cycle(2, lrs)
return learn_clas
def obtain_party_classifier(df, text_column, label_column, substring_filename):
df = df[[label_column, text_column]]
model = fit_model(df, substring_filename, fit_lm=True)
model.save(MODELS_PATH / f'classifier_{substring_filename}')
model.predict("@berlusconi Come non condividere. <NAME>.")
preds, targets = model.get_preds()
predictions = np.argmax(preds, axis=1)
logger.info(accuracy_score(targets, predictions))
OUTPUT_FILE = MODELS_PATH / f'classifier_{substring_filename}_exported.pkl'
model.export(OUTPUT_FILE)
logger.info(f'Saving the model in file: {OUTPUT_FILE}')
| [
"logging.basicConfig",
"logging.getLogger",
"fastai.text.TextClasDataBunch.from_df",
"fastai.text.text_classifier_learner",
"sklearn.model_selection.train_test_split",
"pathlib.Path",
"numpy.argmax",
"numpy.array",
"fastai.text.TextLMDataBunch.from_df",
"fastai.text.language_model_learner",
"skl... | [((632, 671), 'logging.basicConfig', 'logging.basicConfig', ([], {'level': 'logging.INFO'}), '(level=logging.INFO)\n', (651, 671), False, 'import logging\n'), ((681, 718), 'logging.getLogger', 'logging.getLogger', (['"""party prediction"""'], {}), "('party prediction')\n", (698, 718), False, 'import logging\n'), ((853, 905), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df'], {'test_size': '(0.2)', 'random_state': '(42)'}), '(df, test_size=0.2, random_state=42)\n', (869, 905), False, 'from sklearn.model_selection import train_test_split\n'), ((996, 1074), 'fastai.text.TextLMDataBunch.from_df', 'TextLMDataBunch.from_df', ([], {'train_df': 'df_trn', 'valid_df': 'df_val', 'path': '""""""', 'min_freq': '(1)'}), "(train_df=df_trn, valid_df=df_val, path='', min_freq=1)\n", (1019, 1074), False, 'from fastai.text import AWD_LSTM, TextClasDataBunch, TextLMDataBunch, language_model_learner, text_classifier_learner\n'), ((1201, 1279), 'fastai.text.language_model_learner', 'language_model_learner', (['data_lm'], {'arch': 'AWD_LSTM', 'pretrained': '(True)', 'drop_mult': '(0.3)'}), '(data_lm, arch=AWD_LSTM, pretrained=True, drop_mult=0.3)\n', (1223, 1279), False, 'from fastai.text import AWD_LSTM, TextClasDataBunch, TextLMDataBunch, language_model_learner, text_classifier_learner\n'), ((1825, 1935), 'fastai.text.TextClasDataBunch.from_df', 'TextClasDataBunch.from_df', ([], {'path': '""""""', 'train_df': 'df_trn', 'valid_df': 'df_val', 'vocab': 'data_lm.train_ds.vocab', 'bs': '(64)'}), "(path='', train_df=df_trn, valid_df=df_val, vocab=\n data_lm.train_ds.vocab, bs=64)\n", (1850, 1935), False, 'from fastai.text import AWD_LSTM, TextClasDataBunch, TextLMDataBunch, language_model_learner, text_classifier_learner\n'), ((2174, 2270), 'fastai.text.text_classifier_learner', 'text_classifier_learner', (['data_clas', 'AWD_LSTM'], {'drop_mult': '(0.5)', 'metrics': '[accuracy, error_rate]'}), '(data_clas, AWD_LSTM, drop_mult=0.5, metrics=[\n accuracy, error_rate])\n', (2197, 2270), False, 'from fastai.text import AWD_LSTM, TextClasDataBunch, TextLMDataBunch, language_model_learner, text_classifier_learner\n'), ((2517, 2586), 'numpy.array', 'np.array', (['[lr / lrm ** 4, lr / lrm ** 3, lr / lrm ** 2, lr / lrm, lr]'], {}), '([lr / lrm ** 4, lr / lrm ** 3, lr / lrm ** 2, lr / lrm, lr])\n', (2525, 2586), True, 'import numpy as np\n'), ((3335, 3359), 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), '(preds, axis=1)\n', (3344, 3359), True, 'import numpy as np\n'), ((3376, 3412), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['targets', 'predictions'], {}), '(targets, predictions)\n', (3390, 3412), False, 'from sklearn.metrics import accuracy_score\n'), ((117, 131), 'pathlib.Path', 'Path', (['__file__'], {}), '(__file__)\n', (121, 131), False, 'from pathlib import Path\n')] |
import numpy as np
import scipy
from ... import operators
from ... import utilits as ut
from . _ar_yule_walker import ar_yule_walker
__all__ = ['arma_hannan_rissanen']
#------------------------------------------------------------------
def arma_hannan_rissanen(x, poles_order=0, zeros_order=0, unbias = True):
'''
Hannan_Rissanen method for
autoregressive - moving average
(ARMA) model approximation.
Parameters
---------------
* x: 1d ndarray.
* poles_order: int.
the autoregressive model (pole model)
order of the desired model.
* zeros_order: int.
the moving average model (zeros model)
order of the desired model.
* unbias: bool,
if True, unbiased autocorrleation
(sum(x(k)*x(n-k))/(N-n)) will be taken.
Returns
--------------
* a: 1d ndarray,
autoregressive coefficients of the ARMA model.
* b: 1d ndarray,
moving average coefficients of the ARMA model.
* noise_variace: complex of float,
variance of model residulas.
Notes:
------------
* Here are implemented simplified model.
High order AR model is taken equal to
deisred one.
Examples
------------
References
------------
[1] Brockwell, <NAME>., and <NAME>. 2016.
Introduction to Time Series and Forecasting. Springer.
See also
-----------
'''
x = np.asarray(x)
N = x.shape[0]
a,_ = ar_yule_walker(x,
poles_order,
unbias=unbias)
r = operators.lags_matrix(x,
mode='full',
lags=poles_order+1,)
r1 = r[zeros_order:,0] #x[poly_order+zreo_order]
# for i in range(1):
#------------
resid = r[:,0] - r[:,1:].dot(-a[1:])
rresid = operators.lags_matrix(resid,
mode='full',
lags=zeros_order+1,)
rn = np.append(r[zeros_order:,1:],
rresid[2*zeros_order:,1:],axis=1)
# res=np.dot(np.linalg.pinv(-rn),r1)
res = scipy.linalg.lstsq(rn,r1)[0]
a = np.append([1],-res[:poles_order])
#------------
b = res[poles_order:]#np.append([0],res[poles_order:])
err=1
return a,b,err
# def arma_hannan_rissanen_unbiased(x, poly_order=0, zero_order=0,
# unbias = True, n_psd = None):
# '''
# #FOR TEST!
# Hannan_Rissanen method for autoregressive - moving average
# (ARMA) model approximation with additinal unbias of coefficients.
# Parameters
# ---------------
# * x: 1d ndarray,
# inputs.
# * poly_order: int.
# the autoregressive model (pole model)
# order of the desired model.
# * zero_order: int.
# the moving average model (zeros model)
# order of the desired model.
# * n_psd: int or None.
# length of desired pseudospctrum,
# if None, n_psd = x.shape[0],
# if n_psd<0, than model coefficients (1,-a)
# and noise_variance (\sigma^2) will be returend.
# * unbias: bool,
# if True, unbiased autocorrleation
# (sum(x(k)*x(n-k))/(N-n)) will be taken.
# Returns
# --------------
# > if n_psd>0:
# * pseudo-spectrum,
# > else:
# * ar_cofs (a), ma_cofs (b) - 2 1d ndarray;
# * noise_variace - variance of model residulas.
# Notes:
# ------------
# * Here are implemented simplified model.
# High order AR model is taken equal to
# deisred one.
# Examples
# ------------
# References
# ------------
# [1] Brockwell, <NAME>., and <NAME>. 2016.
# Introduction to Time Series and Forecasting. Springer.
# See also
# -----------
# '''
# x = np.asarray(x)
# N = x.shape[0]
# a,b,_ = arma_hannan_rissanen(x,
# poly_order=poly_order,
# zero_order=zero_order,
# unbias = unbias,
# n_psd = -1)
# # unbias
# z = np.zeros(x.shape,dtype = x.dtype)
# for n in np.arange(np.max([poly_order, zero_order]), N):
# tmp_ar = np.dot(-a[1:], x[n - poly_order:n][::-1])
# tmp_ma = np.dot(b,x[n - zero_order:n][::-1])
# z[n] = x[n] - tmp_ar - tmp_ma
# mh = scipy.signal.lfilter([1], a, z)
# ah = scipy.signal.lfilter(np.r_[1, -b], [1], z)
# #i'm not sure here
# rm = matrix.lags_matrix(mh,
# mode='full',
# mcolumns=poly_order+1,)[2*poly_order:,:-1]
# ra = matrix.lags_matrix(ah,
# mode='full',
# mcolumns=zero_order+1,)[2*zero_order:,:-1]
# print(ra.shape,rm.shape)
# r1 = z[max(poly_order, zero_order):] #x[poly_order+zreo_order]
# rn = np.append(rm[max(zero_order - poly_order, 0):,:],
# ra[max(poly_order - zero_order, 0):,:],axis=1)
# res=np.dot(np.linalg.pinv(rn),r1)
# err = np.sum(np.square(r1- rn.dot(res)))/res.size
# a = np.append([1],-(-a[1:]+res[:poly_order]))
# b = b+res[poly_order:]
# if(n_psd<1):
# return a,b,err
# else:
# psd = ut.arma2psd(a,b,np.abs(err),n_psd)
# return psd
# def arma_hannan_rissanen(x, poly_order=0, zero_order=0,
# unbias = True, n_psd = None):
# '''
# Hannan_Rissanen method for autoregressive - moving average
# (ARMA) model approximation.
# Parameters
# ---------------
# * x: 1d ndarray,
# inputs.
# * poly_order: int.
# the autoregressive model (pole model)
# order of the desired model.
# * zero_order: int.
# the moving average model (zeros model)
# order of the desired model.
# * n_psd: int or None.
# length of desired pseudospctrum,
# if None, n_psd = x.shape[0],
# if n_psd<0, than model coefficients (1,-a)
# and noise_variance (\sigma^2) will be returend.
# * unbias: bool,
# if True, unbiased autocorrleation
# (sum(x(k)*x(n-k))/(N-n)) will be taken.
# Returns
# --------------
# > if n_psd>0:
# * pseudo-spectrum,
# > else:
# * ar_cofs (a), ma_cofs (b) - 2 1d ndarray;
# * noise_variace - variance of model residulas.
# Notes:
# ------------
# * Here are implemented simplified model.
# High order AR model is taken equal to
# deisred one.
# Examples
# ------------
# References
# ------------
# [1] Brockwell, <NAME>., and <NAME>. 2016.
# Introduction to Time Series and Forecasting. Springer.
# See also
# -----------
# '''
# x = np.asarray(x)
# N = x.shape[0]
# if n_psd == None: n_psd = N
# a,_ = spectrum.yule_walker(x,
# poly_order,
# n_psd=-1,
# unbias=unbias)
# a = -a[1:]
# r = matrix.lags_matrix(x,
# mode='full',
# mcolumns=poly_order+1,)
# resid = r[:,0] - r[:,1:].dot(a)
# rresid = matrix.lags_matrix(resid,
# mode='full',
# mcolumns=zero_order+1,)
# # Alternatively covar mode can be applied
# # r = matrix.lags_matrix(x,
# # mode='covar',
# # mcolumns=poly_order+1,)
# # resid = r[:,0] - r[:,1:].dot(a)
# # rresid = matrix.lags_matrix(resid,
# # mode='covar',
# # mcolumns=zero_order+1,)
# # rn = np.append(r[zero_order:,1:], rresid[:,1:],axis=1)
# r1 = r[zero_order:,0] #x[poly_order+zreo_order]
# rn = np.append(r[zero_order:,1:], rresid[2*zero_order:,1:],axis=1)
# res=np.dot(np.linalg.pinv(-rn),r1)
# a = np.append([1],res[:poly_order])
# b = res[poly_order:]
# err=1
# if(n_psd<1):
# return a,b,err
# else:
# psd = ut.arma2psd(a,b,np.abs(err),n_psd)
# return psd | [
"numpy.append",
"scipy.linalg.lstsq",
"numpy.asarray"
] | [((1493, 1506), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (1503, 1506), True, 'import numpy as np\n'), ((2073, 2141), 'numpy.append', 'np.append', (['r[zeros_order:, 1:]', 'rresid[2 * zeros_order:, 1:]'], {'axis': '(1)'}), '(r[zeros_order:, 1:], rresid[2 * zeros_order:, 1:], axis=1)\n', (2082, 2141), True, 'import numpy as np\n'), ((2246, 2280), 'numpy.append', 'np.append', (['[1]', '(-res[:poles_order])'], {}), '([1], -res[:poles_order])\n', (2255, 2280), True, 'import numpy as np\n'), ((2209, 2235), 'scipy.linalg.lstsq', 'scipy.linalg.lstsq', (['rn', 'r1'], {}), '(rn, r1)\n', (2227, 2235), False, 'import scipy\n')] |
# coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Three synthetic data generations.
"""
# Necessary functions and packages call
import numpy as np
def synthetic_data_loading(data_name='Syn1', data_no=1000, seed=0):
"""Generates synthetic datasets.
Args:
data_name: Syn1, Syn2, Syn3
data_no: number of training and testing sets
seed: random seed
Returns:
x_train: training features
y_train: training labels
x_test: testing features
y_test: testing labels
c_test: ground truth weights
test_idx: order of testing set index based on the distance from the boundary
"""
# X generation (X ~ N(0,I))
np.random.seed(seed)
data_x = np.random.normal(0, 1, [2 * data_no, 11])
# Y and ground truth local dynamics (C) initialization
data_y = np.zeros([2 * data_no,])
data_c = np.zeros([2 * data_no, 11])
# Boundary definition
if data_name == 'Syn1':
idx0 = np.where(data_x[:, 9] < 0)[0]
idx1 = np.where(data_x[:, 9] >= 0)[0]
elif data_name == 'Syn2':
idx0 = np.where(data_x[:, 9] + np.exp(data_x[:, 10]) < 1)[0]
idx1 = np.where(data_x[:, 9] + np.exp(data_x[:, 10]) >= 1)[0]
elif data_name == 'Syn3':
idx0 = np.where(data_x[:, 9] + np.power(data_x[:, 10], 3) < 0)[0]
idx1 = np.where(data_x[:, 9] + np.power(data_x[:, 10], 3) >= 0)[0]
# Y generation
data_y[idx0] = data_x[idx0, 0] + 2 * data_x[idx0, 1]
data_y[idx1] = 0.5 * data_x[idx1, 2] + 1 * data_x[idx1, 3] + \
1 * data_x[idx1, 4] + 0.5 * data_x[idx1, 5]
# Ground truth local dynamics (C) generation
data_c[idx0, 0] = 1.0
data_c[idx0, 1] = 2.0
data_c[idx1, 2] = 0.5
data_c[idx1, 3] = 1.0
data_c[idx1, 4] = 1.0
data_c[idx1, 5] = 0.5
# Splits train/test sets
x_train = data_x[:data_no, :]
x_test = data_x[data_no:, :]
y_train = data_y[:data_no]
y_test = data_y[data_no:]
c_test = data_c[data_no:, :]
# Order of testing set index based on the distance from the boundary
if data_name == 'Syn1':
test_idx = np.argsort(np.abs(x_test[:, 9]))
elif data_name == 'Syn2':
test_idx = np.argsort(np.abs(x_test[:, 9] + np.exp(x_test[:, 10]) - 1))
elif data_name == 'Syn3':
test_idx = np.argsort(np.abs(x_test[:, 9] + np.power(x_test[:, 10], 3)))
# Returns datasets
return x_train, y_train, x_test, y_test, c_test, test_idx
| [
"numpy.random.normal",
"numpy.abs",
"numpy.power",
"numpy.where",
"numpy.exp",
"numpy.zeros",
"numpy.random.seed"
] | [((1207, 1227), 'numpy.random.seed', 'np.random.seed', (['seed'], {}), '(seed)\n', (1221, 1227), True, 'import numpy as np\n'), ((1239, 1280), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '[2 * data_no, 11]'], {}), '(0, 1, [2 * data_no, 11])\n', (1255, 1280), True, 'import numpy as np\n'), ((1350, 1373), 'numpy.zeros', 'np.zeros', (['[2 * data_no]'], {}), '([2 * data_no])\n', (1358, 1373), True, 'import numpy as np\n'), ((1386, 1413), 'numpy.zeros', 'np.zeros', (['[2 * data_no, 11]'], {}), '([2 * data_no, 11])\n', (1394, 1413), True, 'import numpy as np\n'), ((1476, 1502), 'numpy.where', 'np.where', (['(data_x[:, 9] < 0)'], {}), '(data_x[:, 9] < 0)\n', (1484, 1502), True, 'import numpy as np\n'), ((1517, 1544), 'numpy.where', 'np.where', (['(data_x[:, 9] >= 0)'], {}), '(data_x[:, 9] >= 0)\n', (1525, 1544), True, 'import numpy as np\n'), ((2565, 2585), 'numpy.abs', 'np.abs', (['x_test[:, 9]'], {}), '(x_test[:, 9])\n', (2571, 2585), True, 'import numpy as np\n'), ((1612, 1633), 'numpy.exp', 'np.exp', (['data_x[:, 10]'], {}), '(data_x[:, 10])\n', (1618, 1633), True, 'import numpy as np\n'), ((1677, 1698), 'numpy.exp', 'np.exp', (['data_x[:, 10]'], {}), '(data_x[:, 10])\n', (1683, 1698), True, 'import numpy as np\n'), ((2664, 2685), 'numpy.exp', 'np.exp', (['x_test[:, 10]'], {}), '(x_test[:, 10])\n', (2670, 2685), True, 'import numpy as np\n'), ((2769, 2795), 'numpy.power', 'np.power', (['x_test[:, 10]', '(3)'], {}), '(x_test[:, 10], 3)\n', (2777, 2795), True, 'import numpy as np\n'), ((1772, 1798), 'numpy.power', 'np.power', (['data_x[:, 10]', '(3)'], {}), '(data_x[:, 10], 3)\n', (1780, 1798), True, 'import numpy as np\n'), ((1842, 1868), 'numpy.power', 'np.power', (['data_x[:, 10]', '(3)'], {}), '(data_x[:, 10], 3)\n', (1850, 1868), True, 'import numpy as np\n')] |
# This code is the nmf_imaging.py adjusted for pyKLIP at https://bitbucket.org/pyKLIP/pyklip/src/master/pyklip/nmf_imaging.py
# Another version is kept at https://github.com/seawander/nmf_imaging/blob/master/nmf_imaging_for_pyKLIP.py
# Data imputation is not supported due to the input data structure of pyKLIP, since a 3D cube is needed.
from NonnegMFPy import nmf
import numpy as np
import os
from astropy.io import fits
def NMFcomponents(ref, ref_err = None, n_components = None, maxiters = 1e3, oneByOne = False, path_save = None):
"""Returns the NMF components, where the rows contain the information.
Input: ref and ref_err should be (N * p) where n is the number of references, p is the number of pixels in each reference.
path_save (string): a path to save intermediate results to calculate additional componetns with previous calculated information. Default: None.
Output: NMf components (n_components * p).
"""
if ref_err is None:
ref_err = np.sqrt(ref)
if (n_components is None) or (n_components > ref.shape[0]):
n_components = ref.shape[0]
ref[ref < 0] = 0
ref_err[ref <= 0] = np.nanpercentile(ref_err, 95)*10 #Setting the err of <= 0 pixels to be max error to reduce their impact
ref_columnized = ref.T #columnize ref, making the columns contain the information
ref_err_columnized = ref_err.T # columnize ref_err, making the columns contain the information
components_column = 0
if not oneByOne:
if path_save is not None:
print('path_save is only supported when oneByOne == True.')
g_img = nmf.NMF(ref_columnized, V=1.0/ref_err_columnized**2, n_components=n_components)
chi2, time_used = g_img.SolveNMF(maxiters=maxiters)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
print("Building components one by one...")
if path_save is None:
for i in range(n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == 0):
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
print('\t path_save provided, you might want to load data and continue previous component calculation')
print('\t\t loading from ' + path_save + '_comp.fits for components.')
if not os.path.exists(path_save + '_comp.fits'):
print('\t\t ' + path_save + '_comp.fits does not exist, calculating from scratch.')
for i in range(n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == 0):
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits')
fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits')
fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
else:
W_assign = fits.getdata(path_save + '_comp.fits')
H_assign = fits.getdata(path_save + '_coef.fits')
if W_assign.shape[1] >= n_components:
print('You have already had ' + str(W_assign.shape[1]) + ' components while asking for ' + str(n_components) + '. Returning to your input.')
components_column = W_assign/np.sqrt(np.nansum(W_assign**2, axis = 0))
components = decolumnize(components_column, mask = mask)
else:
print('You are asking for ' + str(n_components) + ' components. Building the rest based on the ' + str(W_assign.shape[1]) + ' provided.')
for i in range(W_assign.shape[1], n_components):
print("\t" + str(i+1) + " of " + str(n_components))
n = i + 1
if (i == W_assign.shape[1]):
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(W_assign)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(H_assign)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
else:
W_ini = np.random.rand(ref_columnized.shape[0], n)
W_ini[:, :(n-1)] = np.copy(g_img.W)
W_ini = np.array(W_ini, order = 'F') #Fortran ordering, column elements contiguous in memory.
H_ini = np.random.rand(n, ref_columnized.shape[1])
H_ini[:(n-1), :] = np.copy(g_img.H)
H_ini = np.array(H_ini, order = 'C') #C ordering, row elements contiguous in memory.
g_img = nmf.NMF(ref_columnized, V = 1.0/ref_err_columnized**2, W = W_ini, H = H_ini, n_components= n)
chi2 = g_img.SolveNMF(maxiters=maxiters)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D component matrix at ' + path_save + '_comp.fits')
fits.writeto(path_save + '_comp.fits', g_img.W, overwrite = True)
print('\t\t\t Calculation for ' + str(n) + ' components done, overwriting raw 2D coefficient matrix at ' + path_save + '_coef.fits')
fits.writeto(path_save + '_coef.fits', g_img.H, overwrite = True)
components_column = g_img.W/np.sqrt(np.nansum(g_img.W**2, axis = 0)) #normalize the components
return components_column.T
def NMFmodelling(trg, components, n_components = None, trg_err = None, maxiters = 1e3, returnChi2 = False, projectionsOnly = False, coefsAlso = False, cube = False, trgThresh = 1.0):
""" NMF modeling.
Inputs:
trg: 1D array, p pixels
components: N * p, calculated using NMFcomponents.
n_components: how many components do you want to use. If None, all the components will be used.
projectionsOnly: output the individual projection results.
cube: whether output a cube or not (increasing the number of components).
trgThresh: ignore the regions with low photon counts. Especially when they are ~10^-15 or smaller. I chose 1 in this case.
Returns:
NMF model of the target.
"""
if n_components is None:
n_components = components.shape[0]
if trg_err is None:
trg_err = np.sqrt(trg)
trg[trg < trgThresh] = 0
trg_err[trg == 0] = np.nanpercentile(trg_err, 95)*10
components_column = components.T #columnize the components, make sure NonnegMFPy returns correct results.
components_column = components_column/np.sqrt(np.nansum(components_column**2, axis = 0)) #normalize the components #make sure the components are normalized.
#Columnize the target and its error.
trg_column = np.zeros((trg.shape[0], 1))
trg_column[:, 0] = trg
trg_err_column = np.zeros((trg_err.shape[0], 1))
trg_err_column[:, 0] = trg_err
if not cube:
trg_img = nmf.NMF(trg_column, V=1/trg_err_column**2, W=components_column, n_components = n_components)
(chi2, time_used) = trg_img.SolveNMF(H_only=True, maxiters = maxiters)
coefs = trg_img.H
if not projectionsOnly:
# return only the final result
model_column = np.dot(components_column, coefs)
else:
# return the individual projections
if not coefsAlso:
return (coefs.flatten() * components.T).T
else:
return (coefs.flatten() * components.T).T, coefs
else:
print("Building models one by one...")
for i in range(n_components):
print("\t" + str(i+1) + " of " + str(n_components))
trg_img = nmf.NMF(trg_column, V=1/trg_err_column**2, W=components_column[:, :i+1], n_components = i + 1)
(chi2, time_used) = trg_img.SolveNMF(H_only=True, maxiters = maxiters)
coefs = trg_img.H
model_column = np.dot(components_column[:, :i+1], coefs)
if returnChi2:
return model_column.T, chi2
if coefsAlso:
return model_column.T, coefs
return model_column.T.flatten()
def NMFsubtraction(trg, model, frac = 1):
"""NMF subtraction with a correction factor, frac."""
if np.shape(np.asarray(frac)) == ():
return (trg-model*frac).flatten()
result = np.zeros((len(frac), ) + model.shape)
for i, fraction in enumerate(frac):
result[i] = trg-model*fraction
return result
def NMFbff(trg, model, fracs = None):
"""BFF subtraction.
Input: trg, model, fracs (if need to be).
Output: best frac
"""
if fracs is None:
fracs = np.arange(0.60, 1.001, 0.001)
std_infos = np.zeros(fracs.shape)
for i, frac in enumerate(fracs):
data_slice = trg - model*frac
while 1:
if np.nansum(data_slice > np.nanmedian(data_slice) + 3*np.nanstd(data_slice)) == 0 or np.nansum(data_slice < np.nanmedian(data_slice) -10*np.nanstd(data_slice)) == 0:
break
data_slice[data_slice > np.nanmedian(data_slice) + 3*np.nanstd(data_slice)] = np.nan
data_slice[data_slice < np.nanmedian(data_slice) - 10*np.nanstd(data_slice)] = np.nan
std_info = np.nanstd(data_slice)
std_infos[i] = std_info
return fracs[np.where(std_infos == np.nanmin(std_infos))]
def nmf_math(sci, ref_psfs, sci_err = None, ref_psfs_err = None, componentNum = 5, maxiters = 1e5, oneByOne = True, trg_type = 'disk', path_save = None):
"""
Main NMF function for high contrast imaging.
Args:
trg (1D array): target image, dimension: height * width.
refs (2D array): reference cube, dimension: referenceNumber * height * width.
trg_err, ref_err: uncertainty for trg and refs, repectively. If None is given, the squareroot of the two arrays will be adopted.
componentNum (integer): number of components to be used. Default: 5. Caution: choosing too many components will slow down the computation.
maxiters (integer): number of iterations needed. Default: 10^5.
oneByOne (boolean): whether to construct the NMF components one by one. Default: True.
trg_type (string): 'disk' (or 'd', for circumstellar disk) or 'planet' (or 'p', for planets). To reveal planets, the BFF procedure will not be implemented.
path_save (string): a path to save intermediate results to calculate additional componetns with previous calculated information. Default: None.
Returns:
result (1D array): NMF modeling result. Only the final subtraction result is returned.
"""
badpix = np.where(np.isnan(sci))
sci[badpix] = 0
components = NMFcomponents(ref_psfs, ref_err = ref_psfs_err, n_components = componentNum, maxiters = maxiters, oneByOne=oneByOne, path_save = path_save)
model = NMFmodelling(trg = sci, components = components, n_components = componentNum, trg_err = sci_err, maxiters=maxiters)
#Bff Procedure below: for planets, it will not be implemented.
if (trg_type == 'p') or (trg_type == 'planet'): # planets
best_frac = 1
elif (trg_type == 'd') or (trg_type == 'disk'): # disks
best_frac = NMFbff(trg = sci, model = model)
result = NMFsubtraction(trg = sci, model = model, frac = best_frac)
return result.flatten()
| [
"os.path.exists",
"numpy.copy",
"numpy.nanstd",
"numpy.sqrt",
"numpy.nanpercentile",
"numpy.random.rand",
"numpy.nanmedian",
"astropy.io.fits.writeto",
"numpy.asarray",
"NonnegMFPy.nmf.NMF",
"numpy.array",
"numpy.zeros",
"numpy.dot",
"numpy.isnan",
"astropy.io.fits.getdata",
"numpy.nan... | [((9410, 9437), 'numpy.zeros', 'np.zeros', (['(trg.shape[0], 1)'], {}), '((trg.shape[0], 1))\n', (9418, 9437), True, 'import numpy as np\n'), ((9486, 9517), 'numpy.zeros', 'np.zeros', (['(trg_err.shape[0], 1)'], {}), '((trg_err.shape[0], 1))\n', (9494, 9517), True, 'import numpy as np\n'), ((11382, 11403), 'numpy.zeros', 'np.zeros', (['fracs.shape'], {}), '(fracs.shape)\n', (11390, 11403), True, 'import numpy as np\n'), ((982, 994), 'numpy.sqrt', 'np.sqrt', (['ref'], {}), '(ref)\n', (989, 994), True, 'import numpy as np\n'), ((1150, 1179), 'numpy.nanpercentile', 'np.nanpercentile', (['ref_err', '(95)'], {}), '(ref_err, 95)\n', (1166, 1179), True, 'import numpy as np\n'), ((1621, 1709), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['ref_columnized'], {'V': '(1.0 / ref_err_columnized ** 2)', 'n_components': 'n_components'}), '(ref_columnized, V=1.0 / ref_err_columnized ** 2, n_components=\n n_components)\n', (1628, 1709), False, 'from NonnegMFPy import nmf\n'), ((8961, 8973), 'numpy.sqrt', 'np.sqrt', (['trg'], {}), '(trg)\n', (8968, 8973), True, 'import numpy as np\n'), ((9036, 9065), 'numpy.nanpercentile', 'np.nanpercentile', (['trg_err', '(95)'], {}), '(trg_err, 95)\n', (9052, 9065), True, 'import numpy as np\n'), ((9588, 9686), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['trg_column'], {'V': '(1 / trg_err_column ** 2)', 'W': 'components_column', 'n_components': 'n_components'}), '(trg_column, V=1 / trg_err_column ** 2, W=components_column,\n n_components=n_components)\n', (9595, 9686), False, 'from NonnegMFPy import nmf\n'), ((11331, 11359), 'numpy.arange', 'np.arange', (['(0.6)', '(1.001)', '(0.001)'], {}), '(0.6, 1.001, 0.001)\n', (11340, 11359), True, 'import numpy as np\n'), ((11916, 11937), 'numpy.nanstd', 'np.nanstd', (['data_slice'], {}), '(data_slice)\n', (11925, 11937), True, 'import numpy as np\n'), ((13318, 13331), 'numpy.isnan', 'np.isnan', (['sci'], {}), '(sci)\n', (13326, 13331), True, 'import numpy as np\n'), ((9236, 9277), 'numpy.nansum', 'np.nansum', (['(components_column ** 2)'], {'axis': '(0)'}), '(components_column ** 2, axis=0)\n', (9245, 9277), True, 'import numpy as np\n'), ((9902, 9934), 'numpy.dot', 'np.dot', (['components_column', 'coefs'], {}), '(components_column, coefs)\n', (9908, 9934), True, 'import numpy as np\n'), ((10363, 10466), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['trg_column'], {'V': '(1 / trg_err_column ** 2)', 'W': 'components_column[:, :i + 1]', 'n_components': '(i + 1)'}), '(trg_column, V=1 / trg_err_column ** 2, W=components_column[:, :i + \n 1], n_components=i + 1)\n', (10370, 10466), False, 'from NonnegMFPy import nmf\n'), ((10608, 10651), 'numpy.dot', 'np.dot', (['components_column[:, :i + 1]', 'coefs'], {}), '(components_column[:, :i + 1], coefs)\n', (10614, 10651), True, 'import numpy as np\n'), ((10930, 10946), 'numpy.asarray', 'np.asarray', (['frac'], {}), '(frac)\n', (10940, 10946), True, 'import numpy as np\n'), ((1805, 1836), 'numpy.nansum', 'np.nansum', (['(g_img.W ** 2)'], {'axis': '(0)'}), '(g_img.W ** 2, axis=0)\n', (1814, 1836), True, 'import numpy as np\n'), ((3280, 3320), 'os.path.exists', 'os.path.exists', (["(path_save + '_comp.fits')"], {}), "(path_save + '_comp.fits')\n", (3294, 3320), False, 'import os\n'), ((5079, 5117), 'astropy.io.fits.getdata', 'fits.getdata', (["(path_save + '_comp.fits')"], {}), "(path_save + '_comp.fits')\n", (5091, 5117), False, 'from astropy.io import fits\n'), ((5145, 5183), 'astropy.io.fits.getdata', 'fits.getdata', (["(path_save + '_coef.fits')"], {}), "(path_save + '_coef.fits')\n", (5157, 5183), False, 'from astropy.io import fits\n'), ((12009, 12029), 'numpy.nanmin', 'np.nanmin', (['std_infos'], {}), '(std_infos)\n', (12018, 12029), True, 'import numpy as np\n'), ((2156, 2228), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['ref_columnized'], {'V': '(1.0 / ref_err_columnized ** 2)', 'n_components': 'n'}), '(ref_columnized, V=1.0 / ref_err_columnized ** 2, n_components=n)\n', (2163, 2228), False, 'from NonnegMFPy import nmf\n'), ((2278, 2320), 'numpy.random.rand', 'np.random.rand', (['ref_columnized.shape[0]', 'n'], {}), '(ref_columnized.shape[0], n)\n', (2292, 2320), True, 'import numpy as np\n'), ((2360, 2376), 'numpy.copy', 'np.copy', (['g_img.W'], {}), '(g_img.W)\n', (2367, 2376), True, 'import numpy as np\n'), ((2405, 2431), 'numpy.array', 'np.array', (['W_ini'], {'order': '"""F"""'}), "(W_ini, order='F')\n", (2413, 2431), True, 'import numpy as np\n'), ((2532, 2574), 'numpy.random.rand', 'np.random.rand', (['n', 'ref_columnized.shape[1]'], {}), '(n, ref_columnized.shape[1])\n', (2546, 2574), True, 'import numpy as np\n'), ((2614, 2630), 'numpy.copy', 'np.copy', (['g_img.H'], {}), '(g_img.H)\n', (2621, 2630), True, 'import numpy as np\n'), ((2659, 2685), 'numpy.array', 'np.array', (['H_ini'], {'order': '"""C"""'}), "(H_ini, order='C')\n", (2667, 2685), True, 'import numpy as np\n'), ((2777, 2871), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['ref_columnized'], {'V': '(1.0 / ref_err_columnized ** 2)', 'W': 'W_ini', 'H': 'H_ini', 'n_components': 'n'}), '(ref_columnized, V=1.0 / ref_err_columnized ** 2, W=W_ini, H=H_ini,\n n_components=n)\n', (2784, 2871), False, 'from NonnegMFPy import nmf\n'), ((4614, 4677), 'astropy.io.fits.writeto', 'fits.writeto', (["(path_save + '_comp.fits')", 'g_img.W'], {'overwrite': '(True)'}), "(path_save + '_comp.fits', g_img.W, overwrite=True)\n", (4626, 4677), False, 'from astropy.io import fits\n'), ((4853, 4916), 'astropy.io.fits.writeto', 'fits.writeto', (["(path_save + '_coef.fits')", 'g_img.H'], {'overwrite': '(True)'}), "(path_save + '_coef.fits', g_img.H, overwrite=True)\n", (4865, 4916), False, 'from astropy.io import fits\n'), ((2989, 3020), 'numpy.nansum', 'np.nansum', (['(g_img.W ** 2)'], {'axis': '(0)'}), '(g_img.W ** 2, axis=0)\n', (2998, 3020), True, 'import numpy as np\n'), ((3635, 3707), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['ref_columnized'], {'V': '(1.0 / ref_err_columnized ** 2)', 'n_components': 'n'}), '(ref_columnized, V=1.0 / ref_err_columnized ** 2, n_components=n)\n', (3642, 3707), False, 'from NonnegMFPy import nmf\n'), ((3765, 3807), 'numpy.random.rand', 'np.random.rand', (['ref_columnized.shape[0]', 'n'], {}), '(ref_columnized.shape[0], n)\n', (3779, 3807), True, 'import numpy as np\n'), ((3851, 3867), 'numpy.copy', 'np.copy', (['g_img.W'], {}), '(g_img.W)\n', (3858, 3867), True, 'import numpy as np\n'), ((3900, 3926), 'numpy.array', 'np.array', (['W_ini'], {'order': '"""F"""'}), "(W_ini, order='F')\n", (3908, 3926), True, 'import numpy as np\n'), ((4031, 4073), 'numpy.random.rand', 'np.random.rand', (['n', 'ref_columnized.shape[1]'], {}), '(n, ref_columnized.shape[1])\n', (4045, 4073), True, 'import numpy as np\n'), ((4117, 4133), 'numpy.copy', 'np.copy', (['g_img.H'], {}), '(g_img.H)\n', (4124, 4133), True, 'import numpy as np\n'), ((4166, 4192), 'numpy.array', 'np.array', (['H_ini'], {'order': '"""C"""'}), "(H_ini, order='C')\n", (4174, 4192), True, 'import numpy as np\n'), ((4288, 4382), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['ref_columnized'], {'V': '(1.0 / ref_err_columnized ** 2)', 'W': 'W_ini', 'H': 'H_ini', 'n_components': 'n'}), '(ref_columnized, V=1.0 / ref_err_columnized ** 2, W=W_ini, H=H_ini,\n n_components=n)\n', (4295, 4382), False, 'from NonnegMFPy import nmf\n'), ((7610, 7673), 'astropy.io.fits.writeto', 'fits.writeto', (["(path_save + '_comp.fits')", 'g_img.W'], {'overwrite': '(True)'}), "(path_save + '_comp.fits', g_img.W, overwrite=True)\n", (7622, 7673), False, 'from astropy.io import fits\n'), ((7857, 7920), 'astropy.io.fits.writeto', 'fits.writeto', (["(path_save + '_coef.fits')", 'g_img.H'], {'overwrite': '(True)'}), "(path_save + '_coef.fits', g_img.H, overwrite=True)\n", (7869, 7920), False, 'from astropy.io import fits\n'), ((11738, 11762), 'numpy.nanmedian', 'np.nanmedian', (['data_slice'], {}), '(data_slice)\n', (11750, 11762), True, 'import numpy as np\n'), ((11835, 11859), 'numpy.nanmedian', 'np.nanmedian', (['data_slice'], {}), '(data_slice)\n', (11847, 11859), True, 'import numpy as np\n'), ((4975, 5006), 'numpy.nansum', 'np.nansum', (['(g_img.W ** 2)'], {'axis': '(0)'}), '(g_img.W ** 2, axis=0)\n', (4984, 5006), True, 'import numpy as np\n'), ((5456, 5488), 'numpy.nansum', 'np.nansum', (['(W_assign ** 2)'], {'axis': '(0)'}), '(W_assign ** 2, axis=0)\n', (5465, 5488), True, 'import numpy as np\n'), ((6016, 6058), 'numpy.random.rand', 'np.random.rand', (['ref_columnized.shape[0]', 'n'], {}), '(ref_columnized.shape[0], n)\n', (6030, 6058), True, 'import numpy as np\n'), ((6106, 6123), 'numpy.copy', 'np.copy', (['W_assign'], {}), '(W_assign)\n', (6113, 6123), True, 'import numpy as np\n'), ((6160, 6186), 'numpy.array', 'np.array', (['W_ini'], {'order': '"""F"""'}), "(W_ini, order='F')\n", (6168, 6186), True, 'import numpy as np\n'), ((6295, 6337), 'numpy.random.rand', 'np.random.rand', (['n', 'ref_columnized.shape[1]'], {}), '(n, ref_columnized.shape[1])\n', (6309, 6337), True, 'import numpy as np\n'), ((6385, 6402), 'numpy.copy', 'np.copy', (['H_assign'], {}), '(H_assign)\n', (6392, 6402), True, 'import numpy as np\n'), ((6439, 6465), 'numpy.array', 'np.array', (['H_ini'], {'order': '"""C"""'}), "(H_ini, order='C')\n", (6447, 6465), True, 'import numpy as np\n'), ((6565, 6659), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['ref_columnized'], {'V': '(1.0 / ref_err_columnized ** 2)', 'W': 'W_ini', 'H': 'H_ini', 'n_components': 'n'}), '(ref_columnized, V=1.0 / ref_err_columnized ** 2, W=W_ini, H=H_ini,\n n_components=n)\n', (6572, 6659), False, 'from NonnegMFPy import nmf\n'), ((6725, 6767), 'numpy.random.rand', 'np.random.rand', (['ref_columnized.shape[0]', 'n'], {}), '(ref_columnized.shape[0], n)\n', (6739, 6767), True, 'import numpy as np\n'), ((6815, 6831), 'numpy.copy', 'np.copy', (['g_img.W'], {}), '(g_img.W)\n', (6822, 6831), True, 'import numpy as np\n'), ((6868, 6894), 'numpy.array', 'np.array', (['W_ini'], {'order': '"""F"""'}), "(W_ini, order='F')\n", (6876, 6894), True, 'import numpy as np\n'), ((7003, 7045), 'numpy.random.rand', 'np.random.rand', (['n', 'ref_columnized.shape[1]'], {}), '(n, ref_columnized.shape[1])\n', (7017, 7045), True, 'import numpy as np\n'), ((7093, 7109), 'numpy.copy', 'np.copy', (['g_img.H'], {}), '(g_img.H)\n', (7100, 7109), True, 'import numpy as np\n'), ((7146, 7172), 'numpy.array', 'np.array', (['H_ini'], {'order': '"""C"""'}), "(H_ini, order='C')\n", (7154, 7172), True, 'import numpy as np\n'), ((7272, 7366), 'NonnegMFPy.nmf.NMF', 'nmf.NMF', (['ref_columnized'], {'V': '(1.0 / ref_err_columnized ** 2)', 'W': 'W_ini', 'H': 'H_ini', 'n_components': 'n'}), '(ref_columnized, V=1.0 / ref_err_columnized ** 2, W=W_ini, H=H_ini,\n n_components=n)\n', (7279, 7366), False, 'from NonnegMFPy import nmf\n'), ((11767, 11788), 'numpy.nanstd', 'np.nanstd', (['data_slice'], {}), '(data_slice)\n', (11776, 11788), True, 'import numpy as np\n'), ((11865, 11886), 'numpy.nanstd', 'np.nanstd', (['data_slice'], {}), '(data_slice)\n', (11874, 11886), True, 'import numpy as np\n'), ((7983, 8014), 'numpy.nansum', 'np.nansum', (['(g_img.W ** 2)'], {'axis': '(0)'}), '(g_img.W ** 2, axis=0)\n', (7992, 8014), True, 'import numpy as np\n'), ((11539, 11563), 'numpy.nanmedian', 'np.nanmedian', (['data_slice'], {}), '(data_slice)\n', (11551, 11563), True, 'import numpy as np\n'), ((11622, 11646), 'numpy.nanmedian', 'np.nanmedian', (['data_slice'], {}), '(data_slice)\n', (11634, 11646), True, 'import numpy as np\n'), ((11568, 11589), 'numpy.nanstd', 'np.nanstd', (['data_slice'], {}), '(data_slice)\n', (11577, 11589), True, 'import numpy as np\n'), ((11651, 11672), 'numpy.nanstd', 'np.nanstd', (['data_slice'], {}), '(data_slice)\n', (11660, 11672), True, 'import numpy as np\n')] |
import sys
import os
import numpy as np
import glob
import matplotlib.pyplot as plt
from matplotlib import cm
import cv2
import pickle
import pyqtgraph as pg
from moviepy.editor import VideoFileClip
import lane
import car
def process_frame(img):
global car
# update frame_number (for debug), clear internal states specific to frame processing
car.newframe()
# calculate combined binary image
car.process_frame_binary(img)
# warp to bird eye view
car.warper(car.binary_image)
if car.debuglevel > 0:
plt.imsave("warped" + str(car.frame_number) + ".jpg", car.warped_image, cmap=cm.gray);
# the actual lane detection in this frame
if car.left_lane.lost(car.frame_number) or car.right_lane.lost(car.frame_number):
car.find_lanes()
else:
car.search_around_poly()
# calculate vehicle position from lane center
car.calc_car_position()
# Create an image to draw the polygon on
warp_zero = np.zeros_like(car.warped_image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
# draw the internal part of the lane with a green polygon combined (after warping back) with the original image
if (car.left_lane.allx is not None) and (car.right_lane.ally is not None):
pts_left = np.array([np.transpose(np.vstack([car.left_lane.allx, car.left_lane.ally]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([car.right_lane.allx, car.right_lane.ally])))])
pts = np.hstack((pts_left, pts_right))
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
newwarp = cv2.warpPerspective(color_warp, car.Minv, (img.shape[1], img.shape[0]))
# Combine the result with the original image
result = cv2.addWeighted(car.undistorted_image, 1, newwarp, 0.3, 0)
else:
result = car.gray_image
font = cv2.FONT_HERSHEY_SIMPLEX
cv2.putText( result, 'Radius of curvature = {} m'.format((car.left_lane.curverad + car.right_lane.curverad) // 2),
(10, 50), font, 1, (255, 255, 255), 2, cv2.LINE_AA )
if car.distance_from_lane_center < 0:
txt = 'left'
else:
txt = 'right'
cv2.putText(result, 'Vehicle is {:.2f}m {} of center'.format(abs(car.distance_from_lane_center), txt ),
(10, 100), font, 1, (255, 255, 255), 2, cv2.LINE_AA)
if car.debuglevel>0:
plt.imsave("frame"+str(car.frame_number)+".jpg", result, cmap = cm.gray);
return result
if __name__ == '__main__':
# debuglevel values: see comment before Car's constructor
debuglevel = 0
car = car.Car( debuglevel )
fname = 'project_video'
output_video_path = '../output_images/{}.mp4'.format(fname)
if car.debuglevel>0:
f=0
t=1
output_video_path = '{}{}-{}.mp4'.format(fname, f, t)
mv = VideoFileClip('../{}.mp4'.format(fname)).subclip(f, t)
else:
mv = VideoFileClip('../{}.mp4'.format(fname))
clip = mv.fl_image(process_frame)
clip.write_videofile(output_video_path, audio=False)
| [
"car.Car",
"numpy.dstack",
"car.newframe",
"car.left_lane.lost",
"car.search_around_poly",
"numpy.hstack",
"car.calc_car_position",
"cv2.addWeighted",
"car.process_frame_binary",
"car.find_lanes",
"cv2.warpPerspective",
"numpy.int_",
"numpy.vstack",
"car.right_lane.lost",
"numpy.zeros_li... | [((357, 371), 'car.newframe', 'car.newframe', ([], {}), '()\n', (369, 371), False, 'import car\n'), ((415, 444), 'car.process_frame_binary', 'car.process_frame_binary', (['img'], {}), '(img)\n', (439, 444), False, 'import car\n'), ((477, 505), 'car.warper', 'car.warper', (['car.binary_image'], {}), '(car.binary_image)\n', (487, 505), False, 'import car\n'), ((883, 906), 'car.calc_car_position', 'car.calc_car_position', ([], {}), '()\n', (904, 906), False, 'import car\n'), ((1035, 1079), 'numpy.dstack', 'np.dstack', (['(warp_zero, warp_zero, warp_zero)'], {}), '((warp_zero, warp_zero, warp_zero))\n', (1044, 1079), True, 'import numpy as np\n'), ((2666, 2685), 'car.Car', 'car.Car', (['debuglevel'], {}), '(debuglevel)\n', (2673, 2685), False, 'import car\n'), ((682, 718), 'car.left_lane.lost', 'car.left_lane.lost', (['car.frame_number'], {}), '(car.frame_number)\n', (700, 718), False, 'import car\n'), ((722, 759), 'car.right_lane.lost', 'car.right_lane.lost', (['car.frame_number'], {}), '(car.frame_number)\n', (741, 759), False, 'import car\n'), ((769, 785), 'car.find_lanes', 'car.find_lanes', ([], {}), '()\n', (783, 785), False, 'import car\n'), ((804, 828), 'car.search_around_poly', 'car.search_around_poly', ([], {}), '()\n', (826, 828), False, 'import car\n'), ((1567, 1599), 'numpy.hstack', 'np.hstack', (['(pts_left, pts_right)'], {}), '((pts_left, pts_right))\n', (1576, 1599), True, 'import numpy as np\n'), ((1680, 1751), 'cv2.warpPerspective', 'cv2.warpPerspective', (['color_warp', 'car.Minv', '(img.shape[1], img.shape[0])'], {}), '(color_warp, car.Minv, (img.shape[1], img.shape[0]))\n', (1699, 1751), False, 'import cv2\n'), ((1822, 1880), 'cv2.addWeighted', 'cv2.addWeighted', (['car.undistorted_image', '(1)', 'newwarp', '(0.3)', '(0)'], {}), '(car.undistorted_image, 1, newwarp, 0.3, 0)\n', (1837, 1880), False, 'import cv2\n'), ((969, 1000), 'numpy.zeros_like', 'np.zeros_like', (['car.warped_image'], {}), '(car.warped_image)\n', (982, 1000), True, 'import numpy as np\n'), ((1633, 1647), 'numpy.int_', 'np.int_', (['[pts]'], {}), '([pts])\n', (1640, 1647), True, 'import numpy as np\n'), ((1387, 1438), 'numpy.vstack', 'np.vstack', (['[car.left_lane.allx, car.left_lane.ally]'], {}), '([car.left_lane.allx, car.left_lane.ally])\n', (1396, 1438), True, 'import numpy as np\n'), ((1495, 1548), 'numpy.vstack', 'np.vstack', (['[car.right_lane.allx, car.right_lane.ally]'], {}), '([car.right_lane.allx, car.right_lane.ally])\n', (1504, 1548), True, 'import numpy as np\n')] |
import pickle
import warnings
from copy import deepcopy
from typing import Iterable, Optional, Union
import numpy as np
import pandas as pd
from copulae.core import is_psd, near_psd
from copulae.types import Array
from muarch.calibrate import calibrate_data
from muarch.funcs import get_annualized_kurtosis, get_annualized_mean, get_annualized_sd, get_annualized_skew
__all__ = ['OptData', 'alter_frequency', 'calibrate_data', 'coalesce_covariance_matrix', 'translate_frequency']
# noinspection PyMissingConstructor
class OptData(np.ndarray):
"""
Returns an OptData class which is an enhancement of ndarray with helper methods. Most of the methods that can be
applied to numpy arrays can be applied to an instance of this object too. By default, any index
"""
def __new__(cls, data: np.ndarray, time_unit='monthly'):
obj = np.asarray(data).view(cls)
return obj
def __init__(self, data: np.ndarray, time_unit='monthly'):
"""
Returns an OptData class which is an enhancement of ndarray with helper methods
Parameters
----------
data: ndarray
3D tensor where the first axis represents the time period, the second axis represents the trials
and the third axis represents the assets
time_unit: {int, 'monthly', 'quarterly', 'semi-annually', 'yearly'}, optional
Specifies how many units (first axis) is required to represent a year. For example, if each time period
represents a month, set this to 12. If quarterly, set to 4. Defaults to 12 which means 1 period represents
a month. Alternatively, specify one of 'monthly', 'quarterly', 'semi-annually' or 'yearly'
"""
assert data.ndim == 3, "Data must be 3 dimensional with shape like (t, n, a) where `t` represents the time " \
"periods, `n` represents the trials and `a` represents the assets"
periods, trials, n_assets = data.shape
self.time_unit = translate_frequency(time_unit)
# empirical covariance taken along the time-asset axis then averaged by trials
# annualized data
a = (data + 1).reshape(periods // self.time_unit, self.time_unit, trials, n_assets).prod(1) - 1
cov_mat = np.mean([np.cov(a[i].T) for i in range(periods // self.time_unit)], 0)
cov_mat = near_psd(cov_mat)
assert is_psd(cov_mat), "covariance matrix must be positive semi-definite"
if np.allclose(np.diag(cov_mat), 1) and np.alltrue(np.abs(cov_mat) <= 1):
warnings.warn("The covariance matrix feels like a correlation matrix. Are you sure it's correct?")
self.n_years = periods / self.time_unit
self.n_assets = n_assets
# minor optimization when using rebalanced optimization. This is essentially a cache
self._unrebalanced_returns_data: Optional[np.ndarray] = None
self._cov_mat = cov_mat
def __array_finalize__(self, obj):
if obj is None:
return
self._cov_mat = getattr(obj, 'cov_mat', None)
self.time_unit = getattr(obj, 'time_unit', 12)
self.n_years = getattr(obj, 'n_years', 0)
self.n_assets = getattr(obj, 'n_assets', 0)
self._unrebalanced_returns_data = getattr(obj, '_unrebalanced_returns_data', None)
def aggregate_assets(self, w: Iterable[float], columns: Optional[Iterable[float]] = None, copy=True):
"""
Aggregates the asset columns with the weights given
The column index (3rd axis) specifies which columns to aggregate. The aggregated column will be the first
column.
Parameters
----------
w: iterable float
The weights to aggregate the columns by. Weights do not have to sum to 1, if it needs to, you should check
it prior
columns: iterable int, optional
The column index of the aggregated data. If not specified, method will aggregate the first :code:`n` columns
where :math:`n` is the length of :code:`w`
copy : boolean, optional
If True, returns a copy of the :class:`OptData`. If False, returns a slice of the original
:class:`OptData`. This means any change made to the :class:`OptData` will affect the original
:class:`OptData` instance as it is not a copy.
Returns
-------
OptData
An instance of :class:`OptData` with the aggregated columns
Examples
--------
If we have a (60 x 1000 x 10) data and we want to aggregate the assets the first 3 indexes,
>>> from allopy import OptData
>>> import numpy as np
>>>
>>> np.random.seed(8888)
>>> data = OptData(np.random.standard_normal((60, 1000, 10)))
>>> data.aggregate_assets([0.3, 0.4, 0.3]).shape
>>>
>>> # Alternatively, we can specify the indices directly. Let's assume we want to aggregate indices [4, 1, 6]
>>> data = OptData(np.random.standard_normal((60, 1000, 10)))
>>> data.aggregate_assets([0.3, 0.4, 0.3], [4, 1, 6]).shape
"""
w = np.asarray(w)
assert w.ndim == 1 and w.size != 0, "`w` must be a non-empty 1D vector"
if columns is not None:
columns = np.asarray(columns)
assert columns.shape == w.shape, "columns must be a 1D integer vector with the same shape as `w`"
else:
columns = np.arange(len(w))
agg = self[..., columns] @ w
mask = [i not in columns for i in range(self.n_assets)] # columns that have not changed
data = OptData(np.concatenate([agg[..., None], self[..., mask]], axis=2), time_unit=self.time_unit)
data.cov_mat = coalesce_covariance_matrix(self.cov_mat, w, columns)
return data.copy() if copy else data
def alter_frequency(self, to: Union[int, str]):
"""
Coalesces a the 3D tensor to a lower frequency.
For example, if we had a 10000 simulations of 10 year, monthly returns for 30 asset classes,
we would originally have a 120 x 10000 x 30 tensor. If we want to collapse this
to a quarterly returns tensor, the resulting tensor's shape would be 40 x 10000 x 30
Note that we can only coalesce data from a higher frequency to lower frequency.
Parameters
----------
to: {int, 'month', 'quarter', 'year'}, optional
The targeted frequency. If a string is passed in, it must be one of ('month', 'quarter', 'year').
If an integer is passed in, this value should be the number of units in a year
Returns
-------
OptData
A :class:`OptData` with lower frequency
Example
-------
>>> import numpy as np
>>> from allopy import OptData
>>>
>>> np.random.seed(8888)
>>> data = np.random.standard_normal((120, 10000, 7))
>>> data = OptData(data)
>>> new_data = data.alter_frequency('quarter') # changes to new_data will affect data
>>> print(new_data.shape)
>>>
>>> # making copies, changes to new_data will not affect data
>>> new_data = data.alter_frequency(4) # this is equivalent of month to year
"""
data = alter_frequency(np.asarray(self), self.time_unit, to)
return OptData(data, to) # Cast as OptData
def calibrate_data(self, mean: Optional[Iterable[float]] = None, sd: Optional[Iterable[float]] = None,
inplace=False):
"""
Calibrates the data given the target mean and standard deviation.
Parameters
----------
mean: iterable float, optional
the targeted mean vector
sd: iterable float, optional
the targeted float vector
inplace: boolean, optional
If True, the :class:`OptData` is modified inplace. This means that the underlying :class:`OptData`
is changed. If False, a new instance of :class:`OptData` is returned
Returns
-------
OptData
an instance of :class:`OptData`
"""
if inplace:
return calibrate_data(self, mean, sd, self.time_unit, True)
return OptData(calibrate_data(self, mean, sd, self.time_unit), self.time_unit)
@property
def cov_mat(self):
"""Returns the covariance matrix of the OptData"""
return self._cov_mat
@cov_mat.setter
def cov_mat(self, cov_mat: np.ndarray):
cov = np.asarray(cov_mat)
ideal_shape = (self.n_assets, self.n_assets)
assert cov.shape == ideal_shape, f"covariance matrix should have shape {ideal_shape}"
self._cov_mat = cov_mat
def cut_by_horizon(self, years: float, copy=True):
"""
Returns the :class:`OptData` with the number of years specified.
For example, given that you have a (120 x ...) :class:`OptData` and each time unit represents a month.
If the first 5 years is required, this method will return a (60 x ...) :class:`OptData`.
Parameters
----------
years: float
number of years
copy: boolean, optional
If True, returns a copy of the :class:`OptData`. If False, returns a slice of the original :class:`OptData`.
This means any change made to the :class:`OptData` will affect the original :class:`OptData` instance
as it is not a copy.
Returns
-------
OptData
A new instance of the cut :class:`OptData`
"""
limit = int(years * self.time_unit)
data = deepcopy(self)[:limit] if copy else self[:limit]
data.n_years = years
return data
def cvar(self, w: Array, rebalance: bool, percentile=5.0):
r"""
Calculates the CVaR given the weights.
CVaR is calculated as the mean of returns that is below the percentile specified. Technically, it is the
expected shortfall. The CVaR is given by:
.. math::
ES_\alpha = \frac{\sum_{i \in \mathbf{r}} \mathbb{1}_\alpha (r_i) \cdot r_i}
{\sum_{i \in \mathbf{r}} \mathbb{1}_\alpha (r_i)}
where :math:`\alpha` is the percentile cutoff, :math:`\mathbf{r}` is the geometric returns vector and
:math:`\mathbb{1}_\alpha` is an indicator function specifying if the returns instance, :math:`r_i` is below
the :math:`alpha` cutoff
Parameters
----------
w: {iterable float, ndarray}
Portfolio weights
rebalance: bool
Whether portfolio is rebalanced every time period
percentile: float, default 5.0
The percentile to cutoff for CVaR calculation
Returns
-------
float
CVaR of the portfolio
See Also
--------
:py:meth:`.portfolio_returns` : Portfolio returns
"""
assert 0 <= percentile <= 100, "Percentile must be a number between [0, 100]"
w = _format_weights(w, self)
returns = self.portfolio_returns(w, rebalance)
cutoff = np.percentile(returns, percentile)
return float(returns[returns <= cutoff].mean())
def expected_return(self, w: Array, rebalance: bool):
r"""
Calculates the annualized expected return given a weight vector
The expected annualized returns is given by
.. math::
\mu_R = \frac{1}{N} \sum^N_i {r_i^{1/y} - 1}
where :math:`r` is an instance of the geometric returns vector and :math:`y` is the number of years.
Parameters
----------
w: {iterable float, ndarray}
Portfolio weights
rebalance: bool
Whether portfolio is rebalanced every time period
Returns
-------
float
Annualized return
See Also
--------
:py:meth:`.portfolio_returns` : Portfolio returns
"""
w = _format_weights(w, self)
returns = self.portfolio_returns(w, rebalance) + 1
return (np.sign(returns) * np.abs(returns) ** (1 / self.n_years)).mean() - 1
def sharpe_ratio(self, w: Array, rebalance: bool) -> float:
r"""
Calculates the portfolio sharpe ratio.
The formula for the sharpe ratio is given by:
.. math::
SR = \frac{\mu_R}{\sigma_R}
Parameters
----------
w: {iterable float, ndarray}
Portfolio weights
rebalance: bool
Whether portfolio is rebalanced every time period
Returns
-------
float
Portfolio sharpe ratio
See Also
--------
:py:meth:`.expected_return` : Expected returns
:py:meth:`.portfolio_returns` : Portfolio returns
:py:meth:`.volatility` : Volatility
"""
w = _format_weights(w, self)
e = 1e6 * self.expected_return(w, rebalance) # added scale for numerical stability during optimization
v = 1e6 * self.volatility(w)
return e / v
def volatility(self, w: Array) -> float:
r"""
Calculates the volatility of the portfolio given a weight vector. The volatility is given by:
.. math::
\mathbf{w} \cdot \Sigma \cdot \mathbf{w^T}
where :math:`\mathbf{w}` is the weight vector and :math:`\Sigma` is the asset covariance matrix
Parameters
----------
w: {iterable float, ndarray}
Portfolio weights
Returns
-------
float
Portfolio volatility
"""
w = _format_weights(w, self)
return float(w.T @ self.cov_mat @ w) ** 0.5
def portfolio_returns(self, w: Array, rebalance: bool) -> np.ndarray:
r"""
Calculates the vector of geometric returns of the portfolio for every trial in the simulation.
The simulated returns is a 3D tensor. If there is rebalancing, then the geometric returns for each trial
is given by:
.. math::
r_i = \prod^T (\mathbf{R_i} \cdot \mathbf{w} + 1) \forall i \in \{ 1, \dots, N \}
Otherwise, if there is no rebalancing:
.. math::
r_i = (\prod^T (\mathbf{R_i} + 1) - 1) \cdot \mathbf{w} \forall i \in \{ 1, \dots, N \}
where :math:`r_i` is the geometric returns for trial :math:`i`, :math:`T` is the total time period,
:math:`\mathbf{R_i}` is the returns matrix for trial :math:`i`, :math:`\mathbf{w}` is the weights vector
and :math:`N` is the total number of trials.
Parameters
----------
w: array_like
Portfolio weights
rebalance: bool
Whether portfolio is rebalanced every time period
Returns
-------
ndarray
vector of portfolio returns
"""
if rebalance:
return (self @ w + 1).prod(0) - 1
else:
if self._unrebalanced_returns_data is None: # cache this calculation
self._unrebalanced_returns_data = np.asarray((self + 1).prod(0) - 1)
return self._unrebalanced_returns_data @ w
def set_cov_mat(self, cov_mat: np.ndarray):
"""
Sets the covariance matrix
Parameters
----------
cov_mat: ndarray
Asset covariance matrix
Returns
-------
OptData
Own OptData instance
"""
cov = np.asarray(cov_mat)
ideal_shape = (self.n_assets, self.n_assets)
assert cov.shape == ideal_shape, f"covariance matrix should have shape {ideal_shape}"
self._cov_mat = cov_mat
return self
@property
def statistics(self):
"""
Returns the statistics (4 moments) of the cube
Returns
-------
DataFrame
The first 4 moments of the cube for each asset (last axis)
"""
return pd.DataFrame({
"Mean": get_annualized_mean(self, self.time_unit),
"SD": get_annualized_sd(self, self.time_unit),
"Skew": get_annualized_skew(self, self.time_unit),
"Kurt": get_annualized_kurtosis(self, self.time_unit),
})
def take_assets(self, start: int, stop: Optional[int] = None):
"""
Returns a new :code:`OptData` instance from the specified start and stop index
Parameters
----------
start: int
Starting index. If the stop index is not specified, the start index will be 0 and this value will become
the stop index. Akin to the :code:`range` function.
stop: int
Stopping index
Returns
-------
OptData
A new OptData instance.
"""
if stop is None:
start, stop = 0, start
assert isinstance(start, int) and isinstance(stop, int), "Indices must be integers"
assert start < stop, "Start index must be less or equal to stop index"
if start == stop:
stop += 1
data: OptData = deepcopy(self)
data = data[..., start:stop]
data.n_assets = stop - start
data.cov_mat = data.cov_mat[start:stop, start:stop]
return data
def to_pickle(self, path: str):
"""
Saves the OptData object as a pickle file
Parameters
----------
path: str
file path of the pickle file
"""
with open(path, 'wb') as f:
pickle.dump(self, f)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
"""
Overwrote the default :code:`ufunc` function so that we can get :class:`OptData` if calculated data is
3 dimensional, :class:`float` if calculated data has 0 dimension or is 1D and len 1 and :class:`ndarray`
otherwise (2D or >= 4D).
Parameters
----------
ufunc:
The :code:`ufunc` object that was called.
method: { '__call__', 'reduce', 'reduceat', 'accumulate', 'outer', 'inner' }
A string indicating which :code:`ufunc` method was called
inputs: tuple
tuple of the input arguments to the :code:`ufunc`.
kwargs: keyword arguments
is a dictionary containing the optional input arguments of the :code:`ufunc`.. If given, any out arguments,
both positional and keyword, are passed as a tuple in kwargs
Returns
-------
{float, ndarray, OptData}
Depending on the shape of calculated data, will return 1 of float, ndarray or OptData
"""
args = []
in_no = []
for i, input_ in enumerate(inputs):
if isinstance(input_, OptData):
in_no.append(i)
args.append(input_.view(np.ndarray))
else:
args.append(input_)
outputs = kwargs.pop('out', None)
out_no = []
if outputs:
out_args = []
for j, output in enumerate(outputs):
if isinstance(output, OptData):
out_no.append(j)
out_args.append(output.view(np.ndarray))
else:
out_args.append(output)
kwargs['out'] = tuple(out_args)
else:
outputs = (None,) * ufunc.nout
info = {}
if in_no:
info['inputs'] = in_no
if out_no:
info['outputs'] = out_no
results = super(OptData, self).__array_ufunc__(ufunc, method, *args, **kwargs)
if results is NotImplemented:
return NotImplemented
if method == 'at':
if isinstance(inputs[0], OptData):
inputs[0].info = info
return
if ufunc.nout == 1:
results = (results,)
results = tuple((np.asarray(result).view(OptData)
if output is None else output)
for result, output in zip(results, outputs))
results = results[0] if len(results) == 1 else results
if isinstance(results, OptData):
if results.ndim in (0, 1) or len(results) == 1:
return float(results)
if results.ndim != 3:
return np.asarray(results)
return results
def __reduce__(self):
"""
This function is used to formulate data that will be sent for pickling.
The end result is the actual blob that will be pickled. Added the class properties explicitly as these are not
passed into pickle by default
Returns
-------
tuple
Tuple object containing data to be sent for pickling
"""
*state, meta = super().__reduce__()
meta = meta + ({
'cov_mat': self._cov_mat,
'n_years': self.n_years,
'n_assets': self.n_assets,
'time_unit': self.time_unit,
'_unrebalanced_returns_data': self._unrebalanced_returns_data
},)
return (*state, meta)
def __setstate__(self, state, *args, **kwargs):
"""
This function is used to recover the class instance from the pickle object. It is called by pickle by default.
Parameters
----------
state: tuple of objects
This state provided is the primary data that will be used to recover the class object
args
arguments
kwargs
keyword arguments
"""
meta = state[-1]
self.n_assets = meta['n_assets']
self.cov_mat = meta['cov_mat']
self.n_years = meta['n_years']
self.time_unit = meta['time_unit']
self._unrebalanced_returns_data = meta['_unrebalanced_returns_data']
super(OptData, self).__setstate__(state[:-1], *args, **kwargs)
def alter_frequency(data, from_='month', to_='quarter'):
"""
Coalesces a the 3D tensor to a lower frequency.
For example, if we had a 10000 simulations of 10 year, monthly returns for 30 asset classes,
we would originally have a 120 x 10000 x 30 tensor. If we want to collapse this
to a quarterly returns tensor, the resulting tensor's shape would be 40 x 10000 x 30
Note that we can only coalesce data from a higher frequency to lower frequency.
Parameters
----------
data: ndarray
The 3-dimension simulation tensor. The data's dimensions must be in time, trials, asset.
from_: {int, 'month', 'quarter', 'year'}, optional
The starting frequency. If a string is passed in, it must be one of ('month', 'quarter', 'year').
If an integer is passed in, this value should be the number of units in a year. Thus, if moving
from monthly data to quarterly data, this argument should be 12
to_: {int, 'month', 'quarter', 'year'}, optional
The targeted frequency. If a string is passed in, it must be one of ('month', 'quarter', 'year').
If an integer is passed in, this value should be the number of units in a year. Thus, if moving
from monthly data to quarterly data, this argument should be 4
Returns
-------
OptData
A :class:`OptData` with lower frequency
Example
-------
>>> import numpy as np
>>> from allopy.opt_data import alter_frequency
>>>
>>> np.random.seed(8888)
>>> data = np.random.standard_normal((120, 10000, 7))
>>> new_data = alter_frequency(data, 'month', 'quarter')
>>> print(new_data.shape)
>>>
>>> # making copies, changes to new_data will not affect data
>>> new_data = alter_frequency(data, 12, 4) # this is equivalent of month to quarter
"""
# type check and convert strings to integers
to_ = translate_frequency(to_)
from_ = translate_frequency(from_)
if to_ == from_:
return data
assert from_ > to_, "Cannot extend data from lower to higher frequency. For example, we " \
"cannot go from yearly data to monthly data. How to fill anything in between?"
t, n, s = data.shape
new_t = t / from_ * to_
assert new_t.is_integer(), f"cannot convert {t} periods to {new_t} periods. Targeted periods must be an integer"
new_t = int(new_t)
return (data.reshape((new_t, t // new_t, n, s)) + 1).prod(1) - 1 # reshape data
def coalesce_covariance_matrix(cov,
w: Iterable[float],
indices: Optional[Iterable[int]] = None) -> Union[np.ndarray, float]:
"""
Aggregates the covariance with the weights given at the indices specified
The aggregated column will be the first column.
Parameters
----------
cov: ndarray
Covariance matrix of the portfolio
w: ndarray
The weights to aggregate the columns by. Weights do not have to sum to 1, if it needs to, you should check
it prior
indices: iterable int, optional
The column index of the aggregated data. If not specified, method will aggregate the first 'n' columns
where 'n' is the length of :code:`w`
Returns
-------
ndarray
Aggregated covariance matrix
Examples
--------
If we have a (60 x 1000 x 10) data and we want to aggregate the assets the first 3 indexes,
>>> from allopy.opt_data import coalesce_covariance_matrix
>>> import numpy as np
form covariance matrix
>>> np.random.seed(8888)
>>> cov = np.random.standard_normal((5, 5))
>>> cov = cov @ cov.T
coalesce first and second column where contribution is (30%, 70%) respectively.
Does not have to sum to 1
>>> coalesce_covariance_matrix(cov, (0.3, 0.7))
coalesce fourth and fifth column
>>> coalesce_covariance_matrix(cov, (0.2, 0.4), (3, 4))
"""
w = np.asarray(w)
cov = np.asarray(cov)
n = len(w)
assert cov.ndim == 2 and cov.shape[0] == cov.shape[1], 'cov must be a square matrix'
assert n <= len(cov), 'adjustment weights cannot be larger than the covariance matrix'
if indices is None:
indices = np.arange(n)
_, a = cov.shape # get number of assets originally
# form transform matrix
T = np.zeros((a - n + 1, a))
T[0, :n] = w
T[1:, n:] = np.eye(a - n)
# re-order covariance matrix
rev_indices = sorted(set(range(a)) - set(indices)) # these are the indices that are not aggregated
indices = [*indices, *rev_indices]
cov = cov[indices][:, indices] # reorder the covariance matrix, first by rows then by columns
cov = T @ cov @ T.T
return float(cov) if cov.size == 1 else near_psd(cov)
def translate_frequency(_freq: Union[str, int]) -> int:
"""Translates a given frequency to the integer equivalent with checks"""
if isinstance(_freq, str):
_freq_ = _freq.lower()
if _freq_ in ('m', 'month', 'monthly'):
return 12
elif _freq_ in ('s', 'semi-annual', 'semi-annually'):
return 6
elif _freq_ in ('q', 'quarter', 'quarterly'):
return 4
elif _freq_ in ('y', 'a', 'year', 'annual', 'yearly', 'annually'): # year
return 1
else:
raise ValueError(f'unknown frequency {_freq}. Use one of month, semi-annual, quarter or annual')
assert isinstance(_freq, int) and _freq > 0, 'frequency can only be a positive integer or a string name'
return _freq
def _format_weights(w, data: OptData) -> np.ndarray:
"""Formats weight inputs. Raises errors if the weights do not have the right number of elements"""
w = np.ravel(w)
assert len(w) == data.n_assets, f'input weights should have {data.n_assets} elements'
return w
| [
"muarch.calibrate.calibrate_data",
"copy.deepcopy",
"copulae.core.near_psd",
"numpy.cov",
"numpy.arange",
"numpy.asarray",
"copulae.core.is_psd",
"numpy.concatenate",
"warnings.warn",
"muarch.funcs.get_annualized_sd",
"muarch.funcs.get_annualized_mean",
"numpy.abs",
"numpy.eye",
"numpy.sig... | [((25934, 25947), 'numpy.asarray', 'np.asarray', (['w'], {}), '(w)\n', (25944, 25947), True, 'import numpy as np\n'), ((25958, 25973), 'numpy.asarray', 'np.asarray', (['cov'], {}), '(cov)\n', (25968, 25973), True, 'import numpy as np\n'), ((26320, 26344), 'numpy.zeros', 'np.zeros', (['(a - n + 1, a)'], {}), '((a - n + 1, a))\n', (26328, 26344), True, 'import numpy as np\n'), ((26378, 26391), 'numpy.eye', 'np.eye', (['(a - n)'], {}), '(a - n)\n', (26384, 26391), True, 'import numpy as np\n'), ((27696, 27707), 'numpy.ravel', 'np.ravel', (['w'], {}), '(w)\n', (27704, 27707), True, 'import numpy as np\n'), ((2373, 2390), 'copulae.core.near_psd', 'near_psd', (['cov_mat'], {}), '(cov_mat)\n', (2381, 2390), False, 'from copulae.core import is_psd, near_psd\n'), ((2406, 2421), 'copulae.core.is_psd', 'is_psd', (['cov_mat'], {}), '(cov_mat)\n', (2412, 2421), False, 'from copulae.core import is_psd, near_psd\n'), ((5155, 5168), 'numpy.asarray', 'np.asarray', (['w'], {}), '(w)\n', (5165, 5168), True, 'import numpy as np\n'), ((8560, 8579), 'numpy.asarray', 'np.asarray', (['cov_mat'], {}), '(cov_mat)\n', (8570, 8579), True, 'import numpy as np\n'), ((11166, 11200), 'numpy.percentile', 'np.percentile', (['returns', 'percentile'], {}), '(returns, percentile)\n', (11179, 11200), True, 'import numpy as np\n'), ((15528, 15547), 'numpy.asarray', 'np.asarray', (['cov_mat'], {}), '(cov_mat)\n', (15538, 15547), True, 'import numpy as np\n'), ((17139, 17153), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (17147, 17153), False, 'from copy import deepcopy\n'), ((26213, 26225), 'numpy.arange', 'np.arange', (['n'], {}), '(n)\n', (26222, 26225), True, 'import numpy as np\n'), ((26737, 26750), 'copulae.core.near_psd', 'near_psd', (['cov'], {}), '(cov)\n', (26745, 26750), False, 'from copulae.core import is_psd, near_psd\n'), ((2569, 2677), 'warnings.warn', 'warnings.warn', (['"""The covariance matrix feels like a correlation matrix. Are you sure it\'s correct?"""'], {}), '(\n "The covariance matrix feels like a correlation matrix. Are you sure it\'s correct?"\n )\n', (2582, 2677), False, 'import warnings\n'), ((5305, 5324), 'numpy.asarray', 'np.asarray', (['columns'], {}), '(columns)\n', (5315, 5324), True, 'import numpy as np\n'), ((5648, 5705), 'numpy.concatenate', 'np.concatenate', (['[agg[..., None], self[..., mask]]'], {'axis': '(2)'}), '([agg[..., None], self[..., mask]], axis=2)\n', (5662, 5705), True, 'import numpy as np\n'), ((7329, 7345), 'numpy.asarray', 'np.asarray', (['self'], {}), '(self)\n', (7339, 7345), True, 'import numpy as np\n'), ((8215, 8267), 'muarch.calibrate.calibrate_data', 'calibrate_data', (['self', 'mean', 'sd', 'self.time_unit', '(True)'], {}), '(self, mean, sd, self.time_unit, True)\n', (8229, 8267), False, 'from muarch.calibrate import calibrate_data\n'), ((8291, 8337), 'muarch.calibrate.calibrate_data', 'calibrate_data', (['self', 'mean', 'sd', 'self.time_unit'], {}), '(self, mean, sd, self.time_unit)\n', (8305, 8337), False, 'from muarch.calibrate import calibrate_data\n'), ((17566, 17586), 'pickle.dump', 'pickle.dump', (['self', 'f'], {}), '(self, f)\n', (17577, 17586), False, 'import pickle\n'), ((856, 872), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (866, 872), True, 'import numpy as np\n'), ((2293, 2307), 'numpy.cov', 'np.cov', (['a[i].T'], {}), '(a[i].T)\n', (2299, 2307), True, 'import numpy as np\n'), ((2498, 2514), 'numpy.diag', 'np.diag', (['cov_mat'], {}), '(cov_mat)\n', (2505, 2514), True, 'import numpy as np\n'), ((9676, 9690), 'copy.deepcopy', 'deepcopy', (['self'], {}), '(self)\n', (9684, 9690), False, 'from copy import deepcopy\n'), ((16040, 16081), 'muarch.funcs.get_annualized_mean', 'get_annualized_mean', (['self', 'self.time_unit'], {}), '(self, self.time_unit)\n', (16059, 16081), False, 'from muarch.funcs import get_annualized_kurtosis, get_annualized_mean, get_annualized_sd, get_annualized_skew\n'), ((16101, 16140), 'muarch.funcs.get_annualized_sd', 'get_annualized_sd', (['self', 'self.time_unit'], {}), '(self, self.time_unit)\n', (16118, 16140), False, 'from muarch.funcs import get_annualized_kurtosis, get_annualized_mean, get_annualized_sd, get_annualized_skew\n'), ((16162, 16203), 'muarch.funcs.get_annualized_skew', 'get_annualized_skew', (['self', 'self.time_unit'], {}), '(self, self.time_unit)\n', (16181, 16203), False, 'from muarch.funcs import get_annualized_kurtosis, get_annualized_mean, get_annualized_sd, get_annualized_skew\n'), ((16225, 16270), 'muarch.funcs.get_annualized_kurtosis', 'get_annualized_kurtosis', (['self', 'self.time_unit'], {}), '(self, self.time_unit)\n', (16248, 16270), False, 'from muarch.funcs import get_annualized_kurtosis, get_annualized_mean, get_annualized_sd, get_annualized_skew\n'), ((20338, 20357), 'numpy.asarray', 'np.asarray', (['results'], {}), '(results)\n', (20348, 20357), True, 'import numpy as np\n'), ((2534, 2549), 'numpy.abs', 'np.abs', (['cov_mat'], {}), '(cov_mat)\n', (2540, 2549), True, 'import numpy as np\n'), ((12130, 12146), 'numpy.sign', 'np.sign', (['returns'], {}), '(returns)\n', (12137, 12146), True, 'import numpy as np\n'), ((12149, 12164), 'numpy.abs', 'np.abs', (['returns'], {}), '(returns)\n', (12155, 12164), True, 'import numpy as np\n'), ((19919, 19937), 'numpy.asarray', 'np.asarray', (['result'], {}), '(result)\n', (19929, 19937), True, 'import numpy as np\n')] |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import os
import seaborn as sns
from global_vars import path_data
sns.set_style({'axes.grid' : False})
sns.set_context('paper')
pd.set_option("display.precision", 2)
#### load original data
data = pd.read_csv(os.path.join(path_data,r'Tarifierung_RI_2017.csv'), delimiter=';' )
N = len(data)
# summary of categorical features
f1_level, f1_counts = np.unique(data['ZahlweiseInkasso'], return_counts=True)
f2_level, f2_counts = np.unique(data['GeschlechtVP1'], return_counts=True)
f3_level, f3_counts = np.unique(data['RauchertypVP1'], return_counts=True)
df = pd.DataFrame(data = None)
df['ZahlweiseInkasso'] = [f'{lvl} ({c/N: .2f} )' for lvl, c in zip(f1_level, f1_counts)]
df['GeschlechtVP1'] = [f'{lvl} ({c/N: .2f} )' for lvl, c in zip(f2_level, f2_counts)]+['', '']
df['RauchertypVP1'] = [f'{lvl} ({c/N: .2f} )' for lvl, c in zip(f3_level, f3_counts)]+['', '']
print(data.describe(include=None).to_latex())
print(df.to_latex())
data['Beginnjahr'] = data['Beginnjahr'] .astype('int')
data.columns = ['year', 'month', 'payment', 'gender', 'smoker', 'age', 'n', 't', 'sum insured', 'premium']
# set some plotting parameters globally
# parameters = {'axes.labelsize': 16, 'xtick.labelsize':14, 'ytick.labelsize': 14, 'legend.fontsize': 14, 'axes.titlesize': 16, 'figure.titlesize': 18}
# plt.rcParams.update(parameters)
_, ax = plt.subplots(3,3)
ax = ax.flatten()
ax[-1].remove()
ax[-2].remove()
for k, e in enumerate(['year', 'month', 'age', 'n', 't', 'sum insured', 'premium']):
ax[k].hist(data[e], bins = 100, weights = np.zeros(N) + 1. / N, color = 'gray')#, font = 'large')
ax[k].set_xlabel(e)
if e == 'year':
ax[k].set_xticks([2015, 2016])
if e == 'sum insured':
ax[k].set_xlabel('S')
ax[k].set_xticks([0,5e5,1e6])
# data.hist(bins=100, grid = False, weights=np.zeros(N) + 1. / N, color = 'gray')
plt.tight_layout()
plt.savefig(os.path.join(path_data,r'data_marginal_dists.eps'))
plt.savefig(os.path.join(path_data,r'data_marginal_dists.png'), dpi=400)
plt.close()
# print(data.corr('pearson'))
# print(data.corr('pearson').to_latex())
# sns.heatmap(data.corr('pearson'))
# plt.show() | [
"numpy.unique",
"seaborn.set_context",
"os.path.join",
"pandas.set_option",
"seaborn.set_style",
"matplotlib.pyplot.close",
"numpy.zeros",
"matplotlib.pyplot.tight_layout",
"pandas.DataFrame",
"matplotlib.pyplot.subplots"
] | [((138, 173), 'seaborn.set_style', 'sns.set_style', (["{'axes.grid': False}"], {}), "({'axes.grid': False})\n", (151, 173), True, 'import seaborn as sns\n'), ((175, 199), 'seaborn.set_context', 'sns.set_context', (['"""paper"""'], {}), "('paper')\n", (190, 199), True, 'import seaborn as sns\n'), ((200, 237), 'pandas.set_option', 'pd.set_option', (['"""display.precision"""', '(2)'], {}), "('display.precision', 2)\n", (213, 237), True, 'import pandas as pd\n'), ((424, 479), 'numpy.unique', 'np.unique', (["data['ZahlweiseInkasso']"], {'return_counts': '(True)'}), "(data['ZahlweiseInkasso'], return_counts=True)\n", (433, 479), True, 'import numpy as np\n'), ((502, 554), 'numpy.unique', 'np.unique', (["data['GeschlechtVP1']"], {'return_counts': '(True)'}), "(data['GeschlechtVP1'], return_counts=True)\n", (511, 554), True, 'import numpy as np\n'), ((577, 629), 'numpy.unique', 'np.unique', (["data['RauchertypVP1']"], {'return_counts': '(True)'}), "(data['RauchertypVP1'], return_counts=True)\n", (586, 629), True, 'import numpy as np\n'), ((636, 659), 'pandas.DataFrame', 'pd.DataFrame', ([], {'data': 'None'}), '(data=None)\n', (648, 659), True, 'import pandas as pd\n'), ((1410, 1428), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(3)'], {}), '(3, 3)\n', (1422, 1428), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1944), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (1942, 1944), True, 'import matplotlib.pyplot as plt\n'), ((2082, 2093), 'matplotlib.pyplot.close', 'plt.close', ([], {}), '()\n', (2091, 2093), True, 'import matplotlib.pyplot as plt\n'), ((283, 333), 'os.path.join', 'os.path.join', (['path_data', '"""Tarifierung_RI_2017.csv"""'], {}), "(path_data, 'Tarifierung_RI_2017.csv')\n", (295, 333), False, 'import os\n'), ((1957, 2007), 'os.path.join', 'os.path.join', (['path_data', '"""data_marginal_dists.eps"""'], {}), "(path_data, 'data_marginal_dists.eps')\n", (1969, 2007), False, 'import os\n'), ((2021, 2071), 'os.path.join', 'os.path.join', (['path_data', '"""data_marginal_dists.png"""'], {}), "(path_data, 'data_marginal_dists.png')\n", (2033, 2071), False, 'import os\n'), ((1609, 1620), 'numpy.zeros', 'np.zeros', (['N'], {}), '(N)\n', (1617, 1620), True, 'import numpy as np\n')] |
import numpy as np
from astropy.nddata import CCDData
import ccdproc as ccdp
from pathlib import Path
import matplotlib.pyplot as plt
import matplotlib.colors as clr
from scipy.optimize import curve_fit as cft
import utils as utl
def flux_extraction(file_name, file_err_name, path, path_err, out_path, images=True):
"""
Parameters
----------
file_name : str
Name of the image/telluric file
from which flux has to be extracted
file_err_name: str
Name of the image/telluric variance file
path : str
Path of the desired image file
path_err : str
Path of the variance of desired image file
out_path : str
Path of the output data and/or image file
images : bool
True if one wants to save visualization of flux data
False if not.
Default is True
----------
returns
----------
flux : data file
.dat file containing the flux at
various pixel values
Path of this file would be similar to
that of image file.
----------
"""
# Reading Data File
ccd = CCDData.read(path + file_name)# + '.fits')
# Trimming the Image
trimmed = ccdp.trim_image(ccd, fits_section = '[1:256, 100:1000]')
trimmed.meta['TRIM'] = True
trimmed.header = ccd.header
#trimmed.write(file_name + '_trim.fits')
# Reading the data from Trimmed image
data = trimmed.data
# Reading Variance File
ccd_err = CCDData.read(path_err + file_err_name)# + '.fits')
# Trimming the Image
trimmed_err = ccdp.trim_image(ccd_err, fits_section = '[1:256, 100:1000]')
trimmed_err.meta['TRIM'] = True
trimmed_err.header = ccd.header
#trimmed.write(file_name + '_trim.fits')
# Reading the data from Trimmed image
data_err = trimmed_err.data
data_err[data_err == 0] = 1
# Creating a function to detect the edges of slit
# For lower edge
def xlow(raw_data):
"""
Parameters
----------
----------
raw_data : numpy.ndarray
Array containing flux at some particular wavelength
----------
returns
----------
number : float
A pixel number showing the lower edge of slit
----------
"""
j = 0
for i in range(int(len(raw_data)/5)):
st = np.std(raw_data[j:j+5])
xlw = 0
if st < 2:
xlw = j
if xlw != 0:
break
j = j + 5
return xlw
# For upper edge
def xup(raw_data):
"""
Parameters
----------
----------
raw_data : numpy.ndarray
Array containing flux at some particular wavelength
----------
returns
----------
number : float
A pixel number showing the upper edge of slit
----------
"""
j = 255
for i in range(int(len(raw_data)/5)):
st = np.std(raw_data[j-5:j])
xup = 0
if st < 2:
xup = j
if xup != 0:
break
j = j - 5
return xup
# Creating xdata and ydata in range of ccd
xall = np.arange(0,256,1)
yall = np.arange(0, 901, 1)
# Detecting the edges of the spectrum
ys = np.array([150, 300, 450, 600, 750])
xs_left = np.array([])
xs_right = np.array([])
xs_mid = np.array([])
for i in range(len(ys)):
dd1 = data[ys[i]]
xll = xlow(dd1)
xs_left = np.hstack((xs_left, xll))
xuu = xup(dd1)
xs_right = np.hstack((xs_right, xuu))
popt_l, pcov_l = cft(utl.line, xs_left, ys)
popt_r, pcov_r = cft(utl.line, xs_right, ys)
# Detecting a line where spectrum should reside
for i in range(len(ys)):
ran_l = utl.inv_line(ys[i], popt_l[0], popt_l[1])
ran_r = utl.inv_line(ys[i], popt_r[0], popt_r[1])
xd1 = data[ys[i]]
xd = xd1[int(ran_l):int(ran_r)]
ma = utl.special_maximum(xd)
ab = np.where(xd == ma)
xs_mid = np.hstack((xs_mid, ab[0][0] + ran_l))
popt_m, pcov_m = cft(utl.line, xs_mid, ys)
# Finding spatial profile
def spatial(data1, data1_err, lam, xlim=25):
ydata = data1[lam]
xmid = utl.inv_line(lam, popt_m[0], popt_m[1])
xlow = xmid - xlim
xup = xmid + xlim
p2 = ydata[int(xlow):int(xup)]
p1 = p2/np.sum(np.abs(p2))
xdata = np.arange(1, len(p1)+1, 1)
poptg, pcovg = cft(utl.gaus, xdata=xdata, ydata=p1, p0=[25,1])
fwhm = np.sqrt(poptg[1]*poptg[1]*np.log(256))
mu1 = poptg[0] + utl.inv_line(lam, *popt_m) - xlim
return mu1, poptg[1], fwhm
# Finding total flux
def total_flux(data1, data1_err, lam):
ydata = data1[lam]
ydata_err = data1_err[lam]
mu1, sig1, fm1 = spatial(data1, data1_err, lam)
p_x = utl.gaus(xall, mu1, sig1)
a1 = 0
a2 = 0
for i in range(len(xall)):
a11 = p_x[i]*ydata[i]/ydata_err[i]
a1 = a1 + a11
a22 = p_x[i]*p_x[i]/ydata_err[i]
a2 = a2 + a22
fopt = a1/a2
var = 1/a2
return fopt, var
# Cosmic Rays Removal
def cosmic_ray(data1, data1_err, lam, threshold=16):
fopt, var = total_flux(data1, data1_err, lam)
d_s = data1[lam]
d_s_err = data1_err[lam]
mu2, sig2, fm2 = spatial(data1, data1_err, lam)
p_x = utl.gaus(xall, mu2, sig2)
data_wo_cr = np.array([])
data_wo_cr_err = np.array([])
for i in range(len(xall)):
xx = (d_s[i] - fopt*p_x[i])**2
yy = xx/d_s_err[i]
if yy>threshold:
if i == len(xall)-1:
xxx = data1[lam][i-1]
else:
xxx = (data1[lam][i-1] + data1[lam][i+1])/2
data_wo_cr = np.hstack((data_wo_cr, xxx))
data_wo_cr_err = np.hstack((data_wo_cr_err, 1))
else:
data_wo_cr = np.hstack((data_wo_cr, data1[lam][i]))
data_wo_cr_err = np.hstack((data_wo_cr_err, data1_err[lam][i]))
return data_wo_cr, data_wo_cr_err
# Data Without Cosmic Rays
final_data = np.array([])
final_data_err = np.array([])
final_data, final_data_err = cosmic_ray(data, data_err, yall[0], threshold=10)
for i in range(len(yall)-1):
fda, fdae = cosmic_ray(data, data_err, yall[i+1])
final_data = np.vstack((final_data, fda))
final_data_err = np.vstack((final_data_err, fdae))
# Flux as a function of pixel
flux = np.array([])
flux_err = np.array([])
for i in range(len(yall)):
f11, v11 = total_flux(final_data, final_data_err, yall[i])
flux = np.hstack((flux, f11))
flux_err = np.hstack((flux_err, v11))
# Saving the image file for flux
if images == True:
fig1 = plt.figure(figsize = (20,10))
plt.errorbar(yall, flux, yerr=flux_err)
plt.xlabel('Pixel Number')
plt.ylabel('Total Flux')
plt.title('Total flux for ' + file_name + ' observation')
plt.grid()
plt.savefig(out_path + '/' + file_name + '_flux.png')
plt.close(fig1)
# Saving Data file of the flux
f1 = open(out_path + '/' + file_name + '_flux.dat', 'w')
f1.write('#Pixel\t\tFlux\n')
for i in range(len(yall)):
f1.write(str(yall[i]) + '\t\t' + str(flux[i]) + '\t' + str(flux_err[i]) + '\n')
f1.close() | [
"matplotlib.pyplot.grid",
"numpy.hstack",
"matplotlib.pyplot.ylabel",
"numpy.log",
"utils.inv_line",
"numpy.array",
"matplotlib.pyplot.errorbar",
"numpy.arange",
"numpy.where",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.close",
"numpy.vstack",
"ccdproc.trim_image",
"utils.special_maxim... | [((1166, 1196), 'astropy.nddata.CCDData.read', 'CCDData.read', (['(path + file_name)'], {}), '(path + file_name)\n', (1178, 1196), False, 'from astropy.nddata import CCDData\n'), ((1253, 1307), 'ccdproc.trim_image', 'ccdp.trim_image', (['ccd'], {'fits_section': '"""[1:256, 100:1000]"""'}), "(ccd, fits_section='[1:256, 100:1000]')\n", (1268, 1307), True, 'import ccdproc as ccdp\n'), ((1537, 1575), 'astropy.nddata.CCDData.read', 'CCDData.read', (['(path_err + file_err_name)'], {}), '(path_err + file_err_name)\n', (1549, 1575), False, 'from astropy.nddata import CCDData\n'), ((1636, 1694), 'ccdproc.trim_image', 'ccdp.trim_image', (['ccd_err'], {'fits_section': '"""[1:256, 100:1000]"""'}), "(ccd_err, fits_section='[1:256, 100:1000]')\n", (1651, 1694), True, 'import ccdproc as ccdp\n'), ((3325, 3345), 'numpy.arange', 'np.arange', (['(0)', '(256)', '(1)'], {}), '(0, 256, 1)\n', (3334, 3345), True, 'import numpy as np\n'), ((3355, 3375), 'numpy.arange', 'np.arange', (['(0)', '(901)', '(1)'], {}), '(0, 901, 1)\n', (3364, 3375), True, 'import numpy as np\n'), ((3432, 3467), 'numpy.array', 'np.array', (['[150, 300, 450, 600, 750]'], {}), '([150, 300, 450, 600, 750])\n', (3440, 3467), True, 'import numpy as np\n'), ((3482, 3494), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3490, 3494), True, 'import numpy as np\n'), ((3510, 3522), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3518, 3522), True, 'import numpy as np\n'), ((3536, 3548), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (3544, 3548), True, 'import numpy as np\n'), ((3767, 3793), 'scipy.optimize.curve_fit', 'cft', (['utl.line', 'xs_left', 'ys'], {}), '(utl.line, xs_left, ys)\n', (3770, 3793), True, 'from scipy.optimize import curve_fit as cft\n'), ((3815, 3842), 'scipy.optimize.curve_fit', 'cft', (['utl.line', 'xs_right', 'ys'], {}), '(utl.line, xs_right, ys)\n', (3818, 3842), True, 'from scipy.optimize import curve_fit as cft\n'), ((4257, 4282), 'scipy.optimize.curve_fit', 'cft', (['utl.line', 'xs_mid', 'ys'], {}), '(utl.line, xs_mid, ys)\n', (4260, 4282), True, 'from scipy.optimize import curve_fit as cft\n'), ((6389, 6401), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6397, 6401), True, 'import numpy as np\n'), ((6423, 6435), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6431, 6435), True, 'import numpy as np\n'), ((6774, 6786), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6782, 6786), True, 'import numpy as np\n'), ((6802, 6814), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (6810, 6814), True, 'import numpy as np\n'), ((3646, 3671), 'numpy.hstack', 'np.hstack', (['(xs_left, xll)'], {}), '((xs_left, xll))\n', (3655, 3671), True, 'import numpy as np\n'), ((3714, 3740), 'numpy.hstack', 'np.hstack', (['(xs_right, xuu)'], {}), '((xs_right, xuu))\n', (3723, 3740), True, 'import numpy as np\n'), ((3941, 3982), 'utils.inv_line', 'utl.inv_line', (['ys[i]', 'popt_l[0]', 'popt_l[1]'], {}), '(ys[i], popt_l[0], popt_l[1])\n', (3953, 3982), True, 'import utils as utl\n'), ((3999, 4040), 'utils.inv_line', 'utl.inv_line', (['ys[i]', 'popt_r[0]', 'popt_r[1]'], {}), '(ys[i], popt_r[0], popt_r[1])\n', (4011, 4040), True, 'import utils as utl\n'), ((4120, 4143), 'utils.special_maximum', 'utl.special_maximum', (['xd'], {}), '(xd)\n', (4139, 4143), True, 'import utils as utl\n'), ((4157, 4175), 'numpy.where', 'np.where', (['(xd == ma)'], {}), '(xd == ma)\n', (4165, 4175), True, 'import numpy as np\n'), ((4193, 4230), 'numpy.hstack', 'np.hstack', (['(xs_mid, ab[0][0] + ran_l)'], {}), '((xs_mid, ab[0][0] + ran_l))\n', (4202, 4230), True, 'import numpy as np\n'), ((4409, 4448), 'utils.inv_line', 'utl.inv_line', (['lam', 'popt_m[0]', 'popt_m[1]'], {}), '(lam, popt_m[0], popt_m[1])\n', (4421, 4448), True, 'import utils as utl\n'), ((4642, 4690), 'scipy.optimize.curve_fit', 'cft', (['utl.gaus'], {'xdata': 'xdata', 'ydata': 'p1', 'p0': '[25, 1]'}), '(utl.gaus, xdata=xdata, ydata=p1, p0=[25, 1])\n', (4645, 4690), True, 'from scipy.optimize import curve_fit as cft\n'), ((5043, 5068), 'utils.gaus', 'utl.gaus', (['xall', 'mu1', 'sig1'], {}), '(xall, mu1, sig1)\n', (5051, 5068), True, 'import utils as utl\n'), ((5609, 5634), 'utils.gaus', 'utl.gaus', (['xall', 'mu2', 'sig2'], {}), '(xall, mu2, sig2)\n', (5617, 5634), True, 'import utils as utl\n'), ((5656, 5668), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5664, 5668), True, 'import numpy as np\n'), ((5694, 5706), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (5702, 5706), True, 'import numpy as np\n'), ((6636, 6664), 'numpy.vstack', 'np.vstack', (['(final_data, fda)'], {}), '((final_data, fda))\n', (6645, 6664), True, 'import numpy as np\n'), ((6690, 6723), 'numpy.vstack', 'np.vstack', (['(final_data_err, fdae)'], {}), '((final_data_err, fdae))\n', (6699, 6723), True, 'import numpy as np\n'), ((6928, 6950), 'numpy.hstack', 'np.hstack', (['(flux, f11)'], {}), '((flux, f11))\n', (6937, 6950), True, 'import numpy as np\n'), ((6970, 6996), 'numpy.hstack', 'np.hstack', (['(flux_err, v11)'], {}), '((flux_err, v11))\n', (6979, 6996), True, 'import numpy as np\n'), ((7073, 7101), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(20, 10)'}), '(figsize=(20, 10))\n', (7083, 7101), True, 'import matplotlib.pyplot as plt\n'), ((7111, 7150), 'matplotlib.pyplot.errorbar', 'plt.errorbar', (['yall', 'flux'], {'yerr': 'flux_err'}), '(yall, flux, yerr=flux_err)\n', (7123, 7150), True, 'import matplotlib.pyplot as plt\n'), ((7159, 7185), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Pixel Number"""'], {}), "('Pixel Number')\n", (7169, 7185), True, 'import matplotlib.pyplot as plt\n'), ((7194, 7218), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Total Flux"""'], {}), "('Total Flux')\n", (7204, 7218), True, 'import matplotlib.pyplot as plt\n'), ((7227, 7284), 'matplotlib.pyplot.title', 'plt.title', (["('Total flux for ' + file_name + ' observation')"], {}), "('Total flux for ' + file_name + ' observation')\n", (7236, 7284), True, 'import matplotlib.pyplot as plt\n'), ((7293, 7303), 'matplotlib.pyplot.grid', 'plt.grid', ([], {}), '()\n', (7301, 7303), True, 'import matplotlib.pyplot as plt\n'), ((7312, 7365), 'matplotlib.pyplot.savefig', 'plt.savefig', (["(out_path + '/' + file_name + '_flux.png')"], {}), "(out_path + '/' + file_name + '_flux.png')\n", (7323, 7365), True, 'import matplotlib.pyplot as plt\n'), ((7374, 7389), 'matplotlib.pyplot.close', 'plt.close', (['fig1'], {}), '(fig1)\n', (7383, 7389), True, 'import matplotlib.pyplot as plt\n'), ((2441, 2466), 'numpy.std', 'np.std', (['raw_data[j:j + 5]'], {}), '(raw_data[j:j + 5])\n', (2447, 2466), True, 'import numpy as np\n'), ((3083, 3108), 'numpy.std', 'np.std', (['raw_data[j - 5:j]'], {}), '(raw_data[j - 5:j])\n', (3089, 3108), True, 'import numpy as np\n'), ((4564, 4574), 'numpy.abs', 'np.abs', (['p2'], {}), '(p2)\n', (4570, 4574), True, 'import numpy as np\n'), ((4731, 4742), 'numpy.log', 'np.log', (['(256)'], {}), '(256)\n', (4737, 4742), True, 'import numpy as np\n'), ((4769, 4795), 'utils.inv_line', 'utl.inv_line', (['lam', '*popt_m'], {}), '(lam, *popt_m)\n', (4781, 4795), True, 'import utils as utl\n'), ((6039, 6067), 'numpy.hstack', 'np.hstack', (['(data_wo_cr, xxx)'], {}), '((data_wo_cr, xxx))\n', (6048, 6067), True, 'import numpy as np\n'), ((6101, 6131), 'numpy.hstack', 'np.hstack', (['(data_wo_cr_err, 1)'], {}), '((data_wo_cr_err, 1))\n', (6110, 6131), True, 'import numpy as np\n'), ((6179, 6217), 'numpy.hstack', 'np.hstack', (['(data_wo_cr, data1[lam][i])'], {}), '((data_wo_cr, data1[lam][i]))\n', (6188, 6217), True, 'import numpy as np\n'), ((6251, 6297), 'numpy.hstack', 'np.hstack', (['(data_wo_cr_err, data1_err[lam][i])'], {}), '((data_wo_cr_err, data1_err[lam][i]))\n', (6260, 6297), True, 'import numpy as np\n')] |
import numpy as np
from wtm_envs.mujoco import robot_env, utils
import mujoco_py
from queue import deque
from mujoco_py import modder
import matplotlib.pyplot as plt
from matplotlib.backends.backend_agg import FigureCanvasAgg
import platform
import os
def goal_distance(goal_a, goal_b):
assert goal_a.shape == goal_b.shape
norm_dist = np.linalg.norm(goal_a - goal_b, axis=-1)
if goal_a.shape[-1] % 3 == 0:
n_xyz = int(goal_a.shape[-1] / 3)
max_dist = np.zeros(norm_dist.shape)
for n in range(n_xyz):
start = n * 3
end = start + 3
subg_a = goal_a[..., start:end]
subg_b = goal_b[..., start:end]
dist = np.asarray(np.linalg.norm(subg_a - subg_b, axis=-1))
if len(max_dist.shape) == 0:
max_dist = np.max([float(dist), float(max_dist)])
else:
max_dist = np.max([dist, max_dist], axis=0)
return max_dist
else:
return norm_dist
class PercDeque(deque):
def __init__(self, maxlen, perc_recomp=100):
self.ctr = 0
self.upper_perc = None
self.lower_perc = None
self.perc_recomp = perc_recomp
super(PercDeque, self).__init__(maxlen=maxlen)
def append(self, vec):
super(PercDeque, self).append(vec)
if self.ctr == self.maxlen:
self.ctr = 0
if self.ctr % self.perc_recomp == 0:
hist_vec = np.array(self)
self.upper_perc = np.percentile(hist_vec, 75, axis=0)
self.lower_perc = np.percentile(hist_vec, 25, axis=0)
self.ctr += 1
class WTMEnv(robot_env.RobotEnv):
def __init__(
self, model_path, n_substeps, initial_qpos, n_actions=4
):
"""Initializes a new Fetch environment.
Args:
model_path (string): path to the environments XML file
n_substeps (int): number of substeps the simulation runs on every call to step
gripper_extra_height (float): additional height above the table when positioning the gripper
block_gripper (boolean): whether or not the gripper is blocked (i.e. not movable) or not
target_in_the_air (boolean): whether or not the target should be in the air above the table or on the table surface
target_offset (float or array with 3 elements): offset of the target
obj_range (float): range of a uniform distribution for sampling initial object positions
target_range (float): range of a uniform distribution for sampling a target
distance_threshold (float): the threshold after which a goal is considered achieved
initial_qpos (dict): a dictionary of joint names and values that define the initial configuration
reward_type ('sparse' or 'dense'): the reward type, i.e. sparse or dense
gripper_goal ('gripper_none', 'gripper_above', 'gripper_random'): the gripper's goal location
n_objects (int): no of objects in the environment. If none, then no_of_objects=0
min_tower_height (int): the minimum height of the tower.
max_tower_height (int): the maximum height of the tower.
"""
self._viewers = {}
self.obs_history = PercDeque(maxlen=5000)
self.obs_noise_coefficient = 0.0
self.plan_cache = {}
self.goal_hierarchy = {}
self.goal = []
self.final_goal = []
self.graph_values = {}
super(WTMEnv, self).__init__(
model_path=model_path, n_substeps=n_substeps, n_actions=n_actions,
initial_qpos=initial_qpos)
self.mod = modder.TextureModder(self.sim)
# assert self.gripper_goal in ['gripper_above', 'gripper_random'], "gripper_none is not supported anymore"
# GoalEnv methods
# ----------------------------
def compute_reward(self, achieved_goal, goal, info):
if self.reward_type == 'sparse':
success = self._is_success(achieved_goal, goal)
return (success - 1).astype(np.float32)
else:
d = goal_distance(achieved_goal, goal)
return -d
# RobotEnv methods
# ----------------------------
def _step_callback(self):
if "block_gripper" in self.__dict__.keys():
if self.block_gripper:
self.sim.data.set_joint_qpos('robot0:l_gripper_finger_joint', 0.)
self.sim.data.set_joint_qpos('robot0:r_gripper_finger_joint', 0.)
self.sim.forward()
def _goal2obs(self, goal):
if len(goal.shape) == 1:
goal_arr = np.array([goal])
else:
goal_arr = goal
assert len(goal_arr.shape) == 2
obs = []
o_dims = self.observation_space.spaces['observation'].shape[0]
o = np.zeros(o_dims, np.float32)
for g in goal_arr:
o[:self.goal_size] = g
obs.append(o.copy())
obs = np.array(obs)
if len(goal.shape) == 1:
return obs[0]
else:
return obs
def _set_action(self, action):
assert action.shape == (4,)
action = action.copy() # ensure that we don't change the action outside of this scope
pos_ctrl, gripper_ctrl = action[:3], action[3]
pos_ctrl *= 0.05 # limit maximum change in position
rot_ctrl = [1., 0., 1., 0.] # fixed rotation of the end effector, expressed as a quaternion
gripper_ctrl = np.array([gripper_ctrl, gripper_ctrl])
assert gripper_ctrl.shape == (2,)
if self.block_gripper:
gripper_ctrl = np.zeros_like(gripper_ctrl)
action = np.concatenate([pos_ctrl, rot_ctrl, gripper_ctrl])
# Apply action to simulation.
utils.ctrl_set_action(self.sim, action)
utils.mocap_set_action(self.sim, action)
self.step_ctr += 1
def add_noise(self, vec, history, noise_coeff):
history.append(vec)
range = history.upper_perc - history.lower_perc
coeff_range = noise_coeff * range
noise = np.random.normal(loc=np.zeros_like(coeff_range), scale=coeff_range)
vec = vec.copy() + noise
return vec
def _get_viewer(self, mode='human'):
viewer = self._viewers.get(mode)
if viewer is None:
if mode == 'human':
viewer = mujoco_py.MjViewer(self.sim)
elif mode == 'rgb_array' or mode == 'depth_array':
viewer = mujoco_py.MjViewer(self.sim)
# The following should work but it does not. Therefore, replaced by human rendering (with MjViewer, the line above) now.
# viewer = mujoco_py.MjRenderContextOffscreen(self.sim, -1)
# viewer = mujoco_py.MjRenderContext(self.sim, -1)
self._viewers[mode] = viewer
self._viewer_setup(mode=mode)
return self._viewers[mode]
def _viewer_setup(self, mode='human'):
if mode == 'human':
body_id = self.sim.model.body_name2id('robot0:gripper_link')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self._viewers[mode].cam.lookat[idx] = value
self._viewers[mode].cam.distance = 2.5
self._viewers[mode].cam.azimuth = 132.
self._viewers[mode].cam.elevation = -14.
elif mode == 'rgb_array':
body_id = self.sim.model.body_name2id('robot0:gripper_link')
lookat = self.sim.data.body_xpos[body_id]
for idx, value in enumerate(lookat):
self._viewers[mode].cam.lookat[idx] = value
self._viewers[mode].cam.distance = 1.
self._viewers[mode].cam.azimuth = 180.
self._viewers[mode].cam.elevation = -40.
def _is_success(self, achieved_goal, desired_goal):
d = goal_distance(achieved_goal, desired_goal)
return (d < self.distance_threshold).astype(np.float32)
def render(self, mode='human'):
self._render_callback()
if mode == 'rgb_array':
self._get_viewer(mode).render()
# window size used for old mujoco-py:
width, height = 1920, 1180
data = self._get_viewer(mode).read_pixels(width, height, depth=False)
# original image is upside-down, so flip it
return data[::-1, :, :]
elif mode == 'human':
self._get_viewer().render()
if bool(self.graph_values):
body_names = [self.sim.model.body_id2name(x) for x in np.arange(self.sim.model.nbody)]
if 'graph_body' in body_names: # check if canvas in XML
self._get_viewer().vopt.geomgroup[3] = 1 # make canvas visible
self.mod.set_rgb("graph_geom", self.create_graph())
def create_graph(self):
# create Graph
fig = plt.figure(figsize=(6.4, 6.4))
canvas = FigureCanvasAgg(fig)
keys = self.graph_values.keys()
keys = filter(lambda x: x[-2:] != '_x', keys)
for i, key in enumerate(keys):
frame_on = i==0
ax = fig.add_subplot(111, label=str(i), frame_on=frame_on)
ax.set_ylabel(str(key), color="C"+str(i))
ax.set_xlabel('step', color="C"+str(i))
if i % 2 != 0:
ax.yaxis.tick_right()
ax.yaxis.set_label_position('right')
ax.xaxis.tick_top()
ax.xaxis.set_label_position('top')
ax.tick_params(axis='y', colors="C"+str(i))
ax.plot(self.graph_values[key+'_x'], self.graph_values[key], color="C"+str(i))
plt.tight_layout()
canvas.draw()
buf = canvas.tostring_rgb()
ncols, nrows = fig.canvas.get_width_height()
plt.close(fig)
return np.fromstring(buf, dtype=np.uint8).reshape(nrows, ncols, 3)
def add_graph_values(self, axis_name, val, x, reset=False):
if reset and axis_name in self.graph_values.keys():
del self.graph_values[axis_name]
del self.graph_values[axis_name+'_x']
if axis_name in self.graph_values:
self.graph_values[axis_name].append(val)
self.graph_values[axis_name+'_x'].append(x)
else:
self.graph_values[axis_name]=[val[0]]
self.graph_values[axis_name+'_x'] = [x] | [
"mujoco_py.MjViewer",
"mujoco_py.modder.TextureModder",
"numpy.max",
"matplotlib.pyplot.close",
"numpy.array",
"numpy.zeros",
"wtm_envs.mujoco.utils.mocap_set_action",
"matplotlib.pyplot.figure",
"numpy.percentile",
"numpy.concatenate",
"numpy.linalg.norm",
"matplotlib.backends.backend_agg.Fig... | [((344, 384), 'numpy.linalg.norm', 'np.linalg.norm', (['(goal_a - goal_b)'], {'axis': '(-1)'}), '(goal_a - goal_b, axis=-1)\n', (358, 384), True, 'import numpy as np\n'), ((480, 505), 'numpy.zeros', 'np.zeros', (['norm_dist.shape'], {}), '(norm_dist.shape)\n', (488, 505), True, 'import numpy as np\n'), ((3649, 3679), 'mujoco_py.modder.TextureModder', 'modder.TextureModder', (['self.sim'], {}), '(self.sim)\n', (3669, 3679), False, 'from mujoco_py import modder\n'), ((4813, 4841), 'numpy.zeros', 'np.zeros', (['o_dims', 'np.float32'], {}), '(o_dims, np.float32)\n', (4821, 4841), True, 'import numpy as np\n'), ((4951, 4964), 'numpy.array', 'np.array', (['obs'], {}), '(obs)\n', (4959, 4964), True, 'import numpy as np\n'), ((5469, 5507), 'numpy.array', 'np.array', (['[gripper_ctrl, gripper_ctrl]'], {}), '([gripper_ctrl, gripper_ctrl])\n', (5477, 5507), True, 'import numpy as np\n'), ((5653, 5703), 'numpy.concatenate', 'np.concatenate', (['[pos_ctrl, rot_ctrl, gripper_ctrl]'], {}), '([pos_ctrl, rot_ctrl, gripper_ctrl])\n', (5667, 5703), True, 'import numpy as np\n'), ((5751, 5790), 'wtm_envs.mujoco.utils.ctrl_set_action', 'utils.ctrl_set_action', (['self.sim', 'action'], {}), '(self.sim, action)\n', (5772, 5790), False, 'from wtm_envs.mujoco import robot_env, utils\n'), ((5799, 5839), 'wtm_envs.mujoco.utils.mocap_set_action', 'utils.mocap_set_action', (['self.sim', 'action'], {}), '(self.sim, action)\n', (5821, 5839), False, 'from wtm_envs.mujoco import robot_env, utils\n'), ((8853, 8883), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6.4, 6.4)'}), '(figsize=(6.4, 6.4))\n', (8863, 8883), True, 'import matplotlib.pyplot as plt\n'), ((8901, 8921), 'matplotlib.backends.backend_agg.FigureCanvasAgg', 'FigureCanvasAgg', (['fig'], {}), '(fig)\n', (8916, 8921), False, 'from matplotlib.backends.backend_agg import FigureCanvasAgg\n'), ((9620, 9638), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (9636, 9638), True, 'import matplotlib.pyplot as plt\n'), ((9759, 9773), 'matplotlib.pyplot.close', 'plt.close', (['fig'], {}), '(fig)\n', (9768, 9773), True, 'import matplotlib.pyplot as plt\n'), ((1447, 1461), 'numpy.array', 'np.array', (['self'], {}), '(self)\n', (1455, 1461), True, 'import numpy as np\n'), ((1492, 1527), 'numpy.percentile', 'np.percentile', (['hist_vec', '(75)'], {'axis': '(0)'}), '(hist_vec, 75, axis=0)\n', (1505, 1527), True, 'import numpy as np\n'), ((1558, 1593), 'numpy.percentile', 'np.percentile', (['hist_vec', '(25)'], {'axis': '(0)'}), '(hist_vec, 25, axis=0)\n', (1571, 1593), True, 'import numpy as np\n'), ((4614, 4630), 'numpy.array', 'np.array', (['[goal]'], {}), '([goal])\n', (4622, 4630), True, 'import numpy as np\n'), ((5608, 5635), 'numpy.zeros_like', 'np.zeros_like', (['gripper_ctrl'], {}), '(gripper_ctrl)\n', (5621, 5635), True, 'import numpy as np\n'), ((709, 749), 'numpy.linalg.norm', 'np.linalg.norm', (['(subg_a - subg_b)'], {'axis': '(-1)'}), '(subg_a - subg_b, axis=-1)\n', (723, 749), True, 'import numpy as np\n'), ((903, 935), 'numpy.max', 'np.max', (['[dist, max_dist]'], {'axis': '(0)'}), '([dist, max_dist], axis=0)\n', (909, 935), True, 'import numpy as np\n'), ((6083, 6109), 'numpy.zeros_like', 'np.zeros_like', (['coeff_range'], {}), '(coeff_range)\n', (6096, 6109), True, 'import numpy as np\n'), ((6349, 6377), 'mujoco_py.MjViewer', 'mujoco_py.MjViewer', (['self.sim'], {}), '(self.sim)\n', (6367, 6377), False, 'import mujoco_py\n'), ((9789, 9823), 'numpy.fromstring', 'np.fromstring', (['buf'], {'dtype': 'np.uint8'}), '(buf, dtype=np.uint8)\n', (9802, 9823), True, 'import numpy as np\n'), ((6466, 6494), 'mujoco_py.MjViewer', 'mujoco_py.MjViewer', (['self.sim'], {}), '(self.sim)\n', (6484, 6494), False, 'import mujoco_py\n'), ((8537, 8568), 'numpy.arange', 'np.arange', (['self.sim.model.nbody'], {}), '(self.sim.model.nbody)\n', (8546, 8568), True, 'import numpy as np\n')] |
# A simple script to generate a set of students
from generator.factory import Factory
from generator.strategy import DemoStrategy
from generator.constants import DEMO
import numpy as np
# Choices
n = 100000 # Number of students desired
pov_cost = 0.05 # mean drop for student in poverty
ell_cost = 0.05 # mean drop for english learners in reading and english
dis_cost = 0.05 # max possible mean change for a student with a disability
strat = DemoStrategy(pov_cost, ell_cost, dis_cost)
stud_fact = Factory(n, DEMO, strat)
stud_fact.print_demos()
# inspect a few students
does = np.random.choice(stud_fact.student_population, 10, replace=False)
print("\nA Student Sample")
for doe in does:
doe.pretty_print()
| [
"generator.strategy.DemoStrategy",
"generator.factory.Factory",
"numpy.random.choice"
] | [((449, 491), 'generator.strategy.DemoStrategy', 'DemoStrategy', (['pov_cost', 'ell_cost', 'dis_cost'], {}), '(pov_cost, ell_cost, dis_cost)\n', (461, 491), False, 'from generator.strategy import DemoStrategy\n'), ((504, 527), 'generator.factory.Factory', 'Factory', (['n', 'DEMO', 'strat'], {}), '(n, DEMO, strat)\n', (511, 527), False, 'from generator.factory import Factory\n'), ((585, 650), 'numpy.random.choice', 'np.random.choice', (['stud_fact.student_population', '(10)'], {'replace': '(False)'}), '(stud_fact.student_population, 10, replace=False)\n', (601, 650), True, 'import numpy as np\n')] |
import warnings
import numpy as np
from scipy import linalg
class Model(object):
name = 'Model'
status_need_for_eval = 0
""" Base class for a model. Actual models should inherit from this class.
In this class the functions that should be implemented by each model are
defined.
Attributes
----------
name : str
Name of the model.
status : int
Indicates the status of the model:
-1 : Instance created. Not filled with values yet.
0 : Filled with values
1 : Filled with values and x0 set (optional level).
"""
def __init__(self):
self.status = -1
self.has_background = False
def initialize(self):
"""This function should be called with all needed values. To actually
fill all the models with values.
"""
self.status = 0
def evaluate(self):
"""Evaluates the model.
Actual implementation of this functions should return:
vec_g : Observable vector
vec_f : Solution vector
vec_f_reg : Vector used in the regularization
"""
if self.status < 0 and self.status_need_for_eval == 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
if self.status < 1 and self.status_need_for_eval == 1:
raise RuntimeError("Model has to be intilized and x0 has to be"
"set. Run 'model.initialize' and "
"'model.set_x0' first!")
def set_model_x0(self):
"""Some models need to be set up with a x0 for the model. For those .
models the class parameter 'status_need_for_eval' should be set to 1.
"""
if self.status < 0:
raise RuntimeError("Model has to be intilized, before setting x0. "
"Run 'model.initialize' first!")
self.status = 1
def generate_fit_x0(self):
"""The model should be able to return resonable starting values
for the fitter.
"""
if self.status < 0 and self.status_need_for_eval == 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
if self.status < 1 and self.status_need_for_eval == 1:
raise RuntimeError("Model has to be intilized and x0 has to be"
"set. Run 'model.initialize' and "
"'model.set_x0' first!")
def generate_fit_bounds(self):
"""The model should be able to return resonable bounds for the fitter.
"""
if self.status < 0 and self.status_need_for_eval == 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
if self.status < 1 and self.status_need_for_eval == 1:
raise RuntimeError("Model has to be intilized and x0 has to be"
"set. Run 'model.initialize' and "
"'model.set_x0' first!")
def add_background(self):
self.has_background = True
def remove_background(self):
"""Disables the background vector. A stored background vector is
not deleted.
"""
self.has_background = False
class LinearModel(Model):
name = 'LinearModel'
status_need_for_eval = 0
""" Basic Linear model:
g = A * f
Attributes
----------
name : str
Name of the model.
status : int
Indicates the status of the model:
-1 : Instance created. Not filled with values yet.
0 : Filled with values
dim_g :
Dimension of the histogrammed observable vector.
dim_f :
Dimension of the histogrammed truth vector.
range_obs : tuple (int, int)
Tuple containing the lowest and highest bin number used in
the digitized observable vector. For performance reasons it is
assumed that all numbers between min and max are used.
range_truth : tuple (int, int)
Tuple containing the lowest and highest bin number used in
the digitized truth vector. For performance reasons it is
assumed that all numbers between min and max are used.
A : numpy.array shape=(dim_g, dim_f)
Response matrix.
vec_b : numpy.array, shape=(dim_f)
Observable vector for the background.
has_background : boolean
Indicator if self.vec_b should be added to the model evaluationg
"""
def __init__(self):
super(LinearModel, self).__init__()
self.range_obs = None
self.range_truth = None
self.A = None
self.dim_f = None
self.dim_g = None
self.vec_b = None
def initialize(self, digitized_obs, digitized_truth, sample_weight=None):
"""
"""
super(LinearModel, self).initialize()
self.range_obs = (min(digitized_obs), max(digitized_obs))
self.range_truth = (min(digitized_truth), max(digitized_truth))
self.dim_f = self.range_truth[1] - self.range_truth[0] + 1
self.dim_g = self.range_obs[1] - self.range_obs[0] + 1
binning_g, binning_f = self.__generate_binning__()
self.A = np.histogram2d(x=digitized_obs,
y=digitized_truth,
bins=(binning_g, binning_f),
weights=sample_weight)[0]
M_norm = np.diag(1 / np.sum(self.A, axis=0))
self.A = np.dot(self.A, M_norm)
def evaluate(self, vec_fit):
"""Evaluating the model for a given vector f
Parameters
----------
vec_fit : numpy.array, shape=(dim_f,)
Vector f for which the model should be evaluated.
Returns
-------
vec_g : nump.array, shape=(dim_g,)
Vector containing the number of events in observable space.
If background was added the returned vector is A * vec_f + vec_b.
vec_f : nump.array, shape=(dim_f,)
Vector used to evaluate A * vec_f
vec_f_reg : nump.array, shape=(dim_f,)
Vector that should be passed to the regularization. For the
BasisLinearModel it is identical to f.
"""
super(LinearModel, self).evaluate()
vec_g = np.dot(self.A, vec_fit)
if self.has_background:
vec_g += self.vec_b
return vec_g, vec_fit, vec_fit
def generate_fit_x0(self, vec_g):
"""Generates a default seed for the minimization.
The default seed vec_f_0 is a uniform distribution with
sum(vec_f_0) = sum(vec_g). If background is present the default seed
is: sum(vec_f_0) = sum(vec_g) - sum(self.vec_b).
Parameters
----------
vec_g : np.array, shape=(dim_g)
Observable vector which should be used to get the correct
normalization for vec_f_0.
Returns
-------
vec_f_0 : np.array, shape=(dim_f)
Seed vector of a minimization.
"""
super(LinearModel, self).generate_fit_x0()
n = self.A.shape[1]
if self.has_background:
vec_f_0 = np.ones(n) * (np.sum(vec_g) - np.sum(self.vec_b)) / n
else:
vec_f_0 = np.ones(n) * np.sum(vec_g) / n
return vec_f_0
def generate_fit_bounds(self, vec_g):
"""Generates a bounds for a minimization.
The bounds are (0, sum(vec_g)) without background and
(0, sum(vec_g - self.vec_b)) with background. The bounds are for
each fit parameter/entry in f.
Parameters
----------
vec_g : np.array, shape=(dim_g)
Observable vector which should be used to get the correct
upper bound
Returns
-------
bounds : list, shape=(dim_f)
List of tuples with the bounds.
"""
super(LinearModel, self).generate_fit_bounds()
n = self.A.shape[1]
if self.has_background:
n_events = np.sum(vec_g) - np.sum(self.vec_b)
else:
n_events = np.sum(vec_g)
bounds = [(0, n_events)] * n
return bounds
def set_model_x0(self):
"""The LinearModel has no referenz model_x0.
"""
super(LinearModel, self).set_model_x0()
warnings.warn('\tx0 has no effect for {}'.format(self.name))
def evaluate_condition(self, normalize=True):
"""Returns an ordered array of the singular values of matrix A.
Parameters
----------
normalize : boolean (optional)
If True the singular values return relativ to the largest
value.
Returns
-------
S_values : np.array, shape=(dim_f)
Ordered array of the singular values.
"""
if self.status < 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
U, S_values, V = linalg.svd(self.A)
S_values = S_values / S_values[0]
return S_values
def __generate_binning__(self):
if self.status < 0:
raise RuntimeError("Model has to be intilized. "
"Run 'model.initialize' first!")
binning_obs = np.linspace(self.range_obs[0],
self.range_obs[1] + 1,
self.dim_g + 1)
binning_truth = np.linspace(self.range_truth[0],
self.range_truth[1] + 1,
self.dim_f + 1)
return binning_obs, binning_truth
def generate_vectors(self, digitized_obs=None, digitized_truth=None):
"""Returns vec_g, vec_f for digitized values. Either f, g or both
can be provided to the function.
Parameters
----------
digitized_obs : np.intarray (optional)
Array with digitized values form the observable space
digitized_truth : np.intarray (optinal)
Array with digitized values for the sought-after quantity.
Returns
-------
vec_g : None or np.array shape=(dim_g)
None if no digitized_obs was provided otherwise the histrogram
of digitized_obs.
vec_f : None or np.array shape=(dim_f)
None if no digitized_truth was provided otherwise the histrogram
of digitized_truth.
"""
binning_obs, binning_truth = self.__generate_binning__()
if digitized_obs is not None:
vec_g = np.histogram(digitized_obs, bins=binning_obs)[0]
else:
vec_g = None
if digitized_truth is not None:
vec_f = np.histogram(digitized_truth, bins=binning_truth)[0]
else:
vec_f = None
return vec_g, vec_f
def add_background(self, vec_b):
"""Adds a background vector to the model.
Parameters
----------
vec_b : numpy.array, shape=(dim_g)
Vector g which is added to the model evaluation.
"""
super(LinearModel, self).add_background()
self.vec_b = vec_b
class BiasedLinearModel(LinearModel):
name = 'BiasedLinearModel'
status_need_for_eval = 1
"""Extense the LinearModel with an bias distribtuion model_x0.
the vec_f is interpreted as element-wise multiple of the model_x0.
g = A * (model_x0 * vec_fit)
Internally the model_x0 is normalize in a way that
vec_fit = [1.] * dim_f
is transformed to
vec_f = model_x0 / sum(model_x0) * sum(vec_g).
Attributes
----------
name : str
Name of the model.
model_x0 : np.array, shape=(vec_f)
Distribtuion which is element-wise multiplied with the vec_fit.
status : int
Indicates the status of the model:
-1 : Instance created. Not filled with values yet.
0 : Filled with values
dim_g :
Dimension of the histogrammed observable vector.
dim_f :
Dimension of the histogrammed truth vector.
range_obs : tuple (int, int)
Tuple containing the lowest and highest bin number used in
the digitized observable vector. For performance reasons it is
assumed that all numbers between min and max are used.
range_truth : tuple (int, int)
Tuple containing the lowest and highest bin number used in
the digitized truth vector. For performance reasons it is
assumed that all numbers between min and max are used.
A : numpy.array shape=(dim_g, dim_f)
Response matrix.
vec_b : numpy.array, shape=(dim_f)
Observable vector for the background.
has_background : boolean
Indicator if self.vec_b should be added to the model evaluationg
"""
def __init__(self):
super(LinearModel, self).__init__()
self.range_obs = None
self.range_truth = None
self.A = None
self.dim_f = None
self.dim_g = None
self.model_x0 = None
self.model_factor_ = 1.
self.background_factor_ = 0.
self.vec_b = None
def evaluate(self, vec_fit):
"""Evaluating the model for a given vector f
Parameters
----------
vec_fit : numpy.array, shape=(dim_f,)
Vector f for which the model should be evaluated.
Returns
-------
vec_g : nump.array, shape=(dim_g,)
Vector containing the number of events in observable space.
If background was added the returned vector is A * vec_f + vec_b.
vec_f : nump.array, shape=(dim_f,)
Vector used to evaluate A * vec_f
vec_f_reg : nump.array, shape=(dim_f,)
Vector that should be passed to the regularization. For the
BasisLinearModel it is identical to f.
"""
vec_f = self.transform_vec_fit(vec_fit)
vec_g, _, _ = super(BiasedLinearModel, self).evaluate(vec_f)
return vec_g, vec_f, vec_fit
def transform_vec_fit(self, vec_fit):
"""Transforms the fit vector to the actual vec_f which is e.g.
used to evaluate the model.
Parameters
----------
vec_fit : np.array, shape=(dim_f)
Vector which should be transformed into an acutal vec_f.
Returns
-------
vec_f : np.array, shape=(dim_f)
Vector in the space of the sought-after quantity.
"""
eff_factor = self.model_factor_ - self.background_factor_
vec_f = self.model_x0 * vec_fit * eff_factor
return vec_f
def generate_fit_x0(self, vec_g):
"""Generates a default seed for the minimization.
The default seed vec_f_0 is a uniform distribution with
sum(vec_f_0) = sum(vec_g). If background is present the default seed
is: sum(vec_f_0) = sum(vec_g) - sum(self.vec_b).
Parameters
----------
vec_g : np.array, shape=(dim_g)
Observable vector which should be used to get the correct
normalization for vec_f_0.
Returns
-------
vec_f_0 : np.array, shape=(dim_f)
Seed vector of a minimization.
"""
super(LinearModel, self).generate_fit_x0()
return np.ones(self.dim_f)
def generate_fit_bounds(self, vec_g):
"""Generates a bounds for a minimization.
The bounds are (0, sum(vec_g)) without background and
(0, sum(vec_g - self.vec_b)) with background. The bounds are for
each fit parameter/entry in f.
Parameters
----------
vec_g : np.array, shape=(dim_g)
Observable vector which should be used to get the correct
upper bound
Returns
-------
bounds : list, shape=(dim_f)
List of tuples with the bounds.
"""
super(LinearModel, self).generate_fit_bounds()
n = self.A.shape[1]
if self.has_background:
n_events = np.sum(vec_g) - np.sum(self.vec_b)
else:
n_events = np.sum(vec_g)
bounds = [(0, n_events)] * n
return bounds
def set_model_x0(self, model_x0, vec_g):
"""Sets the model_x0. Also the vec_g for the unfolding is need,
to get centralize the fit around 1.
Parameters
----------
model_x0 : np.array, shape=(dim_f)
Distribtuion used as a bias. Internally it is nomalized to
sum(model_x0) = 1.
vec_g : np.array, shape=(dim_g)
Observable vector which is used to get the fit centered around
1.
"""
super(LinearModel, self).set_model_x0()
if len(model_x0) != self.dim_f:
raise ValueError("'model_x0' has to be of the length as "
"vec_f!")
self.model_factor = sum(vec_g)
self.model_x0 = model_x0 / self.model_factor
def add_background(self, vec_b):
"""Adds a background vector to the model.
Parameters
----------
vec_b : numpy.array, shape=(dim_g)
Vector g which is added to the model evaluation.
"""
super(LinearModel, self).add_background()
self.vec_b = vec_b
self.background_factor = np.sum(vec_b)
| [
"numpy.histogram",
"numpy.ones",
"numpy.sum",
"numpy.dot",
"numpy.linspace",
"scipy.linalg.svd",
"numpy.histogram2d"
] | [((5615, 5637), 'numpy.dot', 'np.dot', (['self.A', 'M_norm'], {}), '(self.A, M_norm)\n', (5621, 5637), True, 'import numpy as np\n'), ((6431, 6454), 'numpy.dot', 'np.dot', (['self.A', 'vec_fit'], {}), '(self.A, vec_fit)\n', (6437, 6454), True, 'import numpy as np\n'), ((9114, 9132), 'scipy.linalg.svd', 'linalg.svd', (['self.A'], {}), '(self.A)\n', (9124, 9132), False, 'from scipy import linalg\n'), ((9411, 9480), 'numpy.linspace', 'np.linspace', (['self.range_obs[0]', '(self.range_obs[1] + 1)', '(self.dim_g + 1)'], {}), '(self.range_obs[0], self.range_obs[1] + 1, self.dim_g + 1)\n', (9422, 9480), True, 'import numpy as np\n'), ((9573, 9646), 'numpy.linspace', 'np.linspace', (['self.range_truth[0]', '(self.range_truth[1] + 1)', '(self.dim_f + 1)'], {}), '(self.range_truth[0], self.range_truth[1] + 1, self.dim_f + 1)\n', (9584, 9646), True, 'import numpy as np\n'), ((15410, 15429), 'numpy.ones', 'np.ones', (['self.dim_f'], {}), '(self.dim_f)\n', (15417, 15429), True, 'import numpy as np\n'), ((17410, 17423), 'numpy.sum', 'np.sum', (['vec_b'], {}), '(vec_b)\n', (17416, 17423), True, 'import numpy as np\n'), ((5343, 5449), 'numpy.histogram2d', 'np.histogram2d', ([], {'x': 'digitized_obs', 'y': 'digitized_truth', 'bins': '(binning_g, binning_f)', 'weights': 'sample_weight'}), '(x=digitized_obs, y=digitized_truth, bins=(binning_g,\n binning_f), weights=sample_weight)\n', (5357, 5449), True, 'import numpy as np\n'), ((8224, 8237), 'numpy.sum', 'np.sum', (['vec_g'], {}), '(vec_g)\n', (8230, 8237), True, 'import numpy as np\n'), ((16206, 16219), 'numpy.sum', 'np.sum', (['vec_g'], {}), '(vec_g)\n', (16212, 16219), True, 'import numpy as np\n'), ((5574, 5596), 'numpy.sum', 'np.sum', (['self.A'], {'axis': '(0)'}), '(self.A, axis=0)\n', (5580, 5596), True, 'import numpy as np\n'), ((8152, 8165), 'numpy.sum', 'np.sum', (['vec_g'], {}), '(vec_g)\n', (8158, 8165), True, 'import numpy as np\n'), ((8168, 8186), 'numpy.sum', 'np.sum', (['self.vec_b'], {}), '(self.vec_b)\n', (8174, 8186), True, 'import numpy as np\n'), ((10700, 10745), 'numpy.histogram', 'np.histogram', (['digitized_obs'], {'bins': 'binning_obs'}), '(digitized_obs, bins=binning_obs)\n', (10712, 10745), True, 'import numpy as np\n'), ((10848, 10897), 'numpy.histogram', 'np.histogram', (['digitized_truth'], {'bins': 'binning_truth'}), '(digitized_truth, bins=binning_truth)\n', (10860, 10897), True, 'import numpy as np\n'), ((16134, 16147), 'numpy.sum', 'np.sum', (['vec_g'], {}), '(vec_g)\n', (16140, 16147), True, 'import numpy as np\n'), ((16150, 16168), 'numpy.sum', 'np.sum', (['self.vec_b'], {}), '(self.vec_b)\n', (16156, 16168), True, 'import numpy as np\n'), ((7304, 7314), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (7311, 7314), True, 'import numpy as np\n'), ((7394, 7404), 'numpy.ones', 'np.ones', (['n'], {}), '(n)\n', (7401, 7404), True, 'import numpy as np\n'), ((7407, 7420), 'numpy.sum', 'np.sum', (['vec_g'], {}), '(vec_g)\n', (7413, 7420), True, 'import numpy as np\n'), ((7318, 7331), 'numpy.sum', 'np.sum', (['vec_g'], {}), '(vec_g)\n', (7324, 7331), True, 'import numpy as np\n'), ((7334, 7352), 'numpy.sum', 'np.sum', (['self.vec_b'], {}), '(self.vec_b)\n', (7340, 7352), True, 'import numpy as np\n')] |
import numpy as np
import os
import pickle as p
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--result_dir', type=str,required=True)
args = parser.parse_args()
def compute_iou(pred_box, ref_bbox):
N=pred_box.shape[0]
pred_box_rb = pred_box[:,0:3] - pred_box[:,3:6] / 2.0
ref_bbox_rb = ref_bbox[:,0:3] - ref_bbox[:,3:6] / 2.0
pred_box_lt = pred_box[:,0:3] + pred_box[:,3:6] / 2.0
ref_bbox_lt = ref_bbox[:,0:3] + ref_bbox[:,3:6] / 2.0
lt = np.min(np.concatenate([pred_box_lt[:,:,np.newaxis],np.repeat(ref_bbox_lt[:,:,np.newaxis],N,axis=0)],axis=2),axis=2)
rb = np.max(np.concatenate([pred_box_rb[:,:,np.newaxis],np.repeat(ref_bbox_rb[:,:,np.newaxis],N,axis=0)],axis=2),axis=2)
whz = lt - rb
whz[whz < 0] = 0
inter = whz[:, 0] * whz[:, 1] * whz[:, 2]
pred_box_area = pred_box[:, 3] * pred_box[:, 4] * pred_box[:, 5]
ref_box_area = ref_bbox[:, 3] * ref_bbox[:, 4] * ref_bbox[:, 5]
# print(pred_box_area.shape,inter.shape,ref_box_area.shape)
iou = inter / (pred_box_area + np.repeat(ref_box_area,N,axis=0) - inter)
# print(iou)
return iou
result_dir=args.result_dir
k=5
success_count_iou25=0
success_count_iou50=0
Rat2_count=0
Rat5_count=0
Rat10_count=0
Rat20_count=0
total_count=0
Max_IoU=0
success_count=0
iou_sum=0
par_success_count_iou25=0
par_success_count_iou50=0
par_Rat2_count=0
par_Rat5_count=0
par_Rat10_count=0
par_Rat20_count=0
par_Max_IoU=0
scan_list=os.listdir(result_dir)
#print(scan_list)
#scan_list=[scan_list[0]]
for scan_file in scan_list:
scan_output_file=os.path.join(result_dir,scan_file)
scan=scan_file[:12]
#print(scan)
with open(scan_output_file,"rb") as f:
output_content=p.load(f)
object_id_list=list(output_content.keys())
for object_id in object_id_list:
for object_data in output_content[object_id]:
prediction=object_data["pred_intact_box"].T
gt=object_data["gt_intact_bbox"].T
partial_pred=object_data["pred_partial_box"]
partial_gt=object_data["gt_partial_bbox"].T
#prediction=partial_pred[:,0:6]
#gt=partial_gt
#print(prediction.shape)
bbox=prediction[:,0:6]
#print(prediction.shape)
partial_bbox=partial_pred[:,0:6]
confidence=object_data["output"][:,6]
sort_id=np.argsort(-confidence)
topk_id=sort_id[:20]
topk_bbox=bbox[topk_id]
topk_partial_bbox=partial_bbox[topk_id]
target_bbox=gt[np.newaxis,:]
target_partial_bbox=partial_gt[np.newaxis,:]
#print(target_bbox.shape)
iou=compute_iou(topk_bbox,target_bbox)
par_iou=compute_iou(topk_partial_bbox,target_partial_bbox)
par_Max_IoU+=np.max(par_iou)
Max_IoU+=np.max(iou)
if iou[0]>0.25:
success_count_iou25+=1
if iou[0]>0.5:
success_count_iou50+=1
iou_sum+=iou[0]
success_count+=1
if np.max(iou[0:2])>0.5:
Rat2_count+=1
if np.max(iou[0:5])>0.5:
Rat5_count+=1
if np.max(iou[0:10])>0.5:
Rat10_count+=1
if np.max(iou[0:20])>0.5:
Rat20_count+=1
if par_iou[0]>0.25:
par_success_count_iou25+=1
if par_iou[0]>0.5:
par_success_count_iou50+=1
if np.max(par_iou[0:2])>0.5:
par_Rat2_count+=1
if np.max(par_iou[0:5])>0.5:
par_Rat5_count+=1
if np.max(par_iou[0:10])>0.5:
par_Rat10_count+=1
if np.max(par_iou[0:20])>0.5:
par_Rat20_count+=1
total_count+=1
print("---------------intact_bbox--------------------------")
print("IoU25_success_rate:",success_count_iou25/total_count)
print("IoU50_success_rate:", success_count_iou50 / total_count)
print("R@2:", Rat2_count / total_count)
print("R@5:", Rat5_count / total_count)
print("R@10:", Rat10_count / total_count)
print("R@20", Rat20_count / total_count)
print("Max_IoU",Max_IoU/total_count)
print("success mean IoU",iou_sum/success_count)
print("---------------partial_bbox--------------------------")
print("IoU25_success_rate:",par_success_count_iou25/total_count)
print("IoU50_success_rate:", par_success_count_iou50 / total_count)
print("R@2:", Rat2_count / total_count)
print("R@5:", par_Rat5_count / total_count)
print("R@10:", par_Rat10_count / total_count)
print("R@20", par_Rat20_count / total_count)
print("Max_IoU",par_Max_IoU/total_count)
| [
"os.listdir",
"numpy.repeat",
"argparse.ArgumentParser",
"os.path.join",
"pickle.load",
"numpy.max",
"numpy.argsort"
] | [((74, 99), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (97, 99), False, 'import argparse\n'), ((1462, 1484), 'os.listdir', 'os.listdir', (['result_dir'], {}), '(result_dir)\n', (1472, 1484), False, 'import os\n'), ((1578, 1613), 'os.path.join', 'os.path.join', (['result_dir', 'scan_file'], {}), '(result_dir, scan_file)\n', (1590, 1613), False, 'import os\n'), ((1720, 1729), 'pickle.load', 'p.load', (['f'], {}), '(f)\n', (1726, 1729), True, 'import pickle as p\n'), ((2379, 2402), 'numpy.argsort', 'np.argsort', (['(-confidence)'], {}), '(-confidence)\n', (2389, 2402), True, 'import numpy as np\n'), ((2809, 2824), 'numpy.max', 'np.max', (['par_iou'], {}), '(par_iou)\n', (2815, 2824), True, 'import numpy as np\n'), ((2846, 2857), 'numpy.max', 'np.max', (['iou'], {}), '(iou)\n', (2852, 2857), True, 'import numpy as np\n'), ((546, 597), 'numpy.repeat', 'np.repeat', (['ref_bbox_lt[:, :, np.newaxis]', 'N'], {'axis': '(0)'}), '(ref_bbox_lt[:, :, np.newaxis], N, axis=0)\n', (555, 597), True, 'import numpy as np\n'), ((671, 722), 'numpy.repeat', 'np.repeat', (['ref_bbox_rb[:, :, np.newaxis]', 'N'], {'axis': '(0)'}), '(ref_bbox_rb[:, :, np.newaxis], N, axis=0)\n', (680, 722), True, 'import numpy as np\n'), ((1059, 1093), 'numpy.repeat', 'np.repeat', (['ref_box_area', 'N'], {'axis': '(0)'}), '(ref_box_area, N, axis=0)\n', (1068, 1093), True, 'import numpy as np\n'), ((3071, 3087), 'numpy.max', 'np.max', (['iou[0:2]'], {}), '(iou[0:2])\n', (3077, 3087), True, 'import numpy as np\n'), ((3138, 3154), 'numpy.max', 'np.max', (['iou[0:5]'], {}), '(iou[0:5])\n', (3144, 3154), True, 'import numpy as np\n'), ((3205, 3222), 'numpy.max', 'np.max', (['iou[0:10]'], {}), '(iou[0:10])\n', (3211, 3222), True, 'import numpy as np\n'), ((3274, 3291), 'numpy.max', 'np.max', (['iou[0:20]'], {}), '(iou[0:20])\n', (3280, 3291), True, 'import numpy as np\n'), ((3493, 3513), 'numpy.max', 'np.max', (['par_iou[0:2]'], {}), '(par_iou[0:2])\n', (3499, 3513), True, 'import numpy as np\n'), ((3568, 3588), 'numpy.max', 'np.max', (['par_iou[0:5]'], {}), '(par_iou[0:5])\n', (3574, 3588), True, 'import numpy as np\n'), ((3643, 3664), 'numpy.max', 'np.max', (['par_iou[0:10]'], {}), '(par_iou[0:10])\n', (3649, 3664), True, 'import numpy as np\n'), ((3720, 3741), 'numpy.max', 'np.max', (['par_iou[0:20]'], {}), '(par_iou[0:20])\n', (3726, 3741), True, 'import numpy as np\n')] |
import numpy as np
import matplotlib.pyplot as plt
import cv2
fig,ax = plt.subplots()
x,y = np.loadtxt('resultcv.csv', delimiter=',', unpack=True)
x2,y2 = np.loadtxt('result.csv', delimiter=',', unpack=True)
cap = cv2.VideoCapture('../input/inputVideo.avi')
i = 0
while(cap.isOpened()):
_,frame = cap.read()
ax.plot(x[i],-y[i],'bo')
ax.plot(x2[i],y2[i],'ro')
if(i%300 == 0):
plt.pause(0.001)
cv2.imshow('img2',frame)
i += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break;
cap.release()
cv2.destroyAllWindows()
| [
"cv2.imshow",
"cv2.destroyAllWindows",
"cv2.VideoCapture",
"matplotlib.pyplot.pause",
"numpy.loadtxt",
"cv2.waitKey",
"matplotlib.pyplot.subplots"
] | [((74, 88), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {}), '()\n', (86, 88), True, 'import matplotlib.pyplot as plt\n'), ((95, 149), 'numpy.loadtxt', 'np.loadtxt', (['"""resultcv.csv"""'], {'delimiter': '""","""', 'unpack': '(True)'}), "('resultcv.csv', delimiter=',', unpack=True)\n", (105, 149), True, 'import numpy as np\n'), ((158, 210), 'numpy.loadtxt', 'np.loadtxt', (['"""result.csv"""'], {'delimiter': '""","""', 'unpack': '(True)'}), "('result.csv', delimiter=',', unpack=True)\n", (168, 210), True, 'import numpy as np\n'), ((219, 262), 'cv2.VideoCapture', 'cv2.VideoCapture', (['"""../input/inputVideo.avi"""'], {}), "('../input/inputVideo.avi')\n", (235, 262), False, 'import cv2\n'), ((537, 560), 'cv2.destroyAllWindows', 'cv2.destroyAllWindows', ([], {}), '()\n', (558, 560), False, 'import cv2\n'), ((428, 453), 'cv2.imshow', 'cv2.imshow', (['"""img2"""', 'frame'], {}), "('img2', frame)\n", (438, 453), False, 'import cv2\n'), ((407, 423), 'matplotlib.pyplot.pause', 'plt.pause', (['(0.001)'], {}), '(0.001)\n', (416, 423), True, 'import matplotlib.pyplot as plt\n'), ((472, 486), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (483, 486), False, 'import cv2\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 15 14:59:18 2019
@author: m2
"""
import numpy as np
from math import hypot, atan2
from config import grndstep, grndstart, grndlen
from rectools import uv2xy, fitRecToMold, uvBound2xyNormal
series_distance_cutoff = .45 ** 2 # m
min_points_in_segment = 3
concave_split_cutoff = 1. # m
def segmentPoints(points):
if len(points) < 3: return []
diff = np.diff(points[:,:2], axis=0)
distsq = diff[:,0]*diff[:,0] + diff[:,1]*diff[:,1]
cuts = np.where(distsq > series_distance_cutoff)[0]
if cuts.shape[0]==0:
return [points] # all contiguous
# can't think of a decent numpy way to do this next part
fixed_cuts = [0]
outlier_idxs = [[]]
if cuts[0] == 0: # first point is outlier
fixed_cuts.append(1)
outlier_idxs.append([])
cuts = cuts[1:]
for cut in cuts:
if cut == fixed_cuts[-1]:
# this point was an outlier
outlier_skipped = points[cut+1,:2]-points[cut-1,:2]
outlier_skipped_distance = outlier_skipped[0]*outlier_skipped[0] +\
outlier_skipped[1]*outlier_skipped[1]
if outlier_skipped_distance <= series_distance_cutoff:
fixed_cuts.pop() # the cut was only due to this outlier
outlier_idxs.pop()
outlier_idxs[-1].append(cut - fixed_cuts[-1])
else:
fixed_cuts.append(cut+1)
outlier_idxs.append([])
if fixed_cuts[-1]==len(points)-1: # last element is an outlier
outlier_idxs.pop() # just leave it out of the cuts
else:
fixed_cuts.append(len(points))
n_cuts = len(fixed_cuts) - 1
segs = [np.delete(points[fixed_cuts[idx]:fixed_cuts[idx+1]], outlier_idxs[idx], axis=0)
for idx in range(n_cuts)]
# remove segments that are too small
# also, check for high concavity - possibly inward corner
fixed_segs = []
for seg in segs:
if seg.shape[0] < min_points_in_segment:
continue
seg_beginning = (seg[0]+seg[1])/2
seg_end = (seg[-2]+seg[-1])/2
u = seg_end[1]-seg_beginning[1] # ux+vy=c
v = seg_beginning[0]-seg_end[0]
c = seg_end[1]*seg_beginning[0]-seg_end[0]*seg_beginning[1]
uvlen = hypot(u,v)
points_distconcave = u*seg[:,0]+v*seg[:,1]-c
split_point = np.argmax(points_distconcave)
if points_distconcave[split_point] > concave_split_cutoff * uvlen:
#print("splitting on concavity {:.1f}".format(points_distconcave[split_point]))
if split_point >= min_points_in_segment:
fixed_segs.append(seg[:split_point])
if split_point <= seg.shape[0]-min_points_in_segment:
fixed_segs.append(seg[split_point:])
else:
fixed_segs.append(seg)
return fixed_segs
min_edge_len = .05
def makeMeasurement(pts):
if pts.shape[0] < 2:
raise Exception("can't segment fewer than 2 points")
elif pts.shape[0] == 2:
u = pts[1][0] - pts[0][0]
v = pts[1][1] - pts[0][1]
uvlen = hypot(u,v)
u /= uvlen
v /= uvlen
ulo = pts[0][0]*u + pts[0][1]*v
vlo = pts[0][0]*v - pts[0][1]*u
uvdiff = max((min_edge_len - uvlen)*.5, 0) # enforce minimum edge len
return (u,v,ulo-uvdiff,ulo+uvlen+uvdiff,vlo,vlo+min_edge_len)
else:
rec = marShrinkSearch(pts)
# keep a minimum size for objects
rec = (rec[0], rec[1], rec[2], max(rec[2]+min_edge_len, rec[3]),
rec[4], max(rec[4]+min_edge_len, rec[5]))
return rec#standardize(rec) # should always be standardized
halfpi = np.pi/2
shrinksearch_initres = halfpi / 5
shrinksearch_initangles = np.arange(0, halfpi, shrinksearch_initres)
shrinksearch_finalres = .03 # stop once resolution below this is used
def marShrinkSearch(pts):
angle_limit = np.arctan2(pts[-1,1], pts[-1,0])
res = shrinksearch_initres
checks = shrinksearch_initangles + angle_limit
best_fit = 1e6
best_angle = None
best_rec = None
while res > shrinksearch_finalres:
for angle in checks:
c,s = np.cos(angle), np.sin(angle)
u = c*pts[:,0] + s*pts[:,1]
v = s*pts[:,0] - c*pts[:,1]
uhi = max(u)
vhi = max(v)
ulo = min(u)
vlo = min(v)
fit = scoreVisibleArea(u,v, ulo,uhi,vlo,vhi)
if fit < best_fit:
best_fit = fit
best_rec = (c, s, ulo,uhi,vlo,vhi)
best_angle = angle
res *= .5
checks = ((best_angle+res-angle_limit)%halfpi + angle_limit,
(best_angle-res-angle_limit)%halfpi + angle_limit)
return best_rec
def scoreArea(u, v, ulo,uhi,vlo,vhi):
return (uhi-ulo)*(vhi-vlo)
def scoreSumDist(u, v, ulo,uhi,vlo,vhi):
udist = np.minimum(u-ulo, uhi-u)
vdist = np.minimum(v-vlo, vhi-v)
return sum(np.minimum(udist, vdist))
def scoreVisibleDist(u, v, ulo,uhi,vlo,vhi):
if ulo < 0:
return sum(v-vlo)
return sum(np.minimum(u-ulo, v-vlo))
def scoreVisibleArea(u,v, ulo,uhi,vlo,vhi):
v = v - vlo
vhi -= vlo
area = u[1:].dot(v[:-1])
area -= u[:-1].dot(v[1:])
area += u[-1]*v[-1]
area -= u[0]*v[0]
if ulo < 0:
area += (u[0]-ulo)*v[0]*2
else:
area += (u[0]-ulo)*vhi*2
area += (uhi-u[-1])*v[-1]*2
return area
""" assuming laser forms a cone directly downward (which is close)
finds distance in each angle
as of 8/18/19 returns distance instead of boolean
"""
groundforlaser_angles = np.arange(-1.35, 1.4, .1)
lowestdistance = 4.
def getGroundForLaser(ground, laser_tan, laser_height=1.65, max_distance = 50.):
ground_present = np.zeros(groundforlaser_angles.shape)
tilexmin = grndstart[0]*grndstep[0]
tilexmax = tilexmin + grndlen[0]*grndstep[0]
tileymin = grndstart[1]*grndstep[1]
tileymax = tileymin + grndlen[1]*grndstep[1]
for angle_idx, angle in enumerate(groundforlaser_angles):
ax = np.cos(angle)
ay = np.sin(angle)
x,y = 0,0
distance = lowestdistance
x,y = ax*distance, ay*distance
grounddistance = 1e3
while x>tilexmin and x<tilexmax and y>tileymin and y<tileymax:
z = laser_height + laser_tan*distance
tilex = int(x/grndstep[0])-grndstart[0]
tiley = int(y/grndstep[1])-grndstart[1]
height = ground[tilex, tiley].dot((x,y,z,-1))
if height <= 0 or height > 2.1:
grounddistance = distance
break
distance += 2.
x,y = ax*distance, ay*distance
ground_present[angle_idx] = grounddistance
return ground_present
inf = 1e3 # meters, suitably large number
angle_acceptable_overlap = .03 # radians
occlusion_resolution = .01 # radians
def makeOcclusionMap(segs, segsareground, recs, starting_angle, ending_angle,
hard_ends = True, ground_present=None, ground_points=None):
"""
inputs:
segs = segmented points (only first and last are used, except for ground segs)
seg_ground = boolean list of whether each segment is ground
recs = rectangle approximation of each segment, standardized
starting_angle = in radians, beginning of visible range
ending_angle = in radians, end of visible range
hard_ends = whether msmts extending beyond starting or ending angles
are considered occluded or not
ground_present
if None: absorption is ignored (treated like absence of object)
else: boolean array
stating whether each angle of this laser hits ground or not
output:
Nx5 array, columns = (angle, distance1, distance2, cw segment idx, cc segment idx)
objects occluded the line segments between each row
for instance if row 5 = (theta1, d1, d2) and row6 = (theta2, d3, d4)
there is an occluding line segment between cos(theta1)*d2, sin(theta1)*d2
and cos(theta2)*d3, sin(theta2)*d3
"""
if len(segs) == 0: return np.zeros((0,5))
occlusion_map = np.zeros((len(segs)*3+2, 5))
end_distances = 0. if hard_ends else inf
#starting_angle = -1.35
#ending_angle = 1.35
occlusion_map[0] = (starting_angle, end_distances, inf, -1, -1)
occlusion_map[-1] = (ending_angle, inf, end_distances, len(segs), len(segs))
for seg_idx in range(len(segs)):
seg = segs[seg_idx]
seg_is_ground = segsareground[seg_idx]
rec = recs[seg_idx]
first_point = seg[0]
last_point = seg[-1]
first_angle = atan2(first_point[1], first_point[0])
last_angle = atan2(last_point[1], last_point[0])
if seg_is_ground:
# for concave ground segments, use farther point for occlusion distance
median_point = seg[len(seg) // 2]
first_dist = hypot(median_point[0], median_point[1])
last_dist = first_dist
corner_angle = atan2(median_point[1], median_point[0])
corner_dist = first_dist
else:
# for convex object detections, use endpoints
# will also use midpoint taken from rectangle fit, later
first_dist = hypot(first_point[0], first_point[1])
last_dist = hypot(last_point[0], last_point[1])
if rec[2] > 0:
# two sides visible
corner_x = rec[0]*rec[2] + rec[1]*rec[4] # major error fixed 8/20
corner_y = rec[1]*rec[2] - rec[0]*rec[4]
corner_angle = atan2(corner_y, corner_x)
if (corner_angle > first_angle + occlusion_resolution and
last_angle > corner_angle + occlusion_resolution):
corner_dist = hypot(corner_y, corner_x)
else:
corner_angle = first_angle
corner_dist = first_dist
else:
# flat object
corner_angle = first_angle
corner_dist = first_dist
occlusion_map[seg_idx*3+1] = (first_angle, inf, first_dist, seg_idx, seg_idx)
occlusion_map[seg_idx*3+2] = (corner_angle, corner_dist, corner_dist,
seg_idx, seg_idx)
occlusion_map[seg_idx*3+3] = (last_angle, last_dist, inf, seg_idx, seg_idx)
# prune and search for errors
new_map = []
point = tuple(occlusion_map[0])
for nextpoint in occlusion_map[1:]:
radian_distance = nextpoint[0] - point[0]
if radian_distance > occlusion_resolution:
new_map.append(point)
point = tuple(nextpoint)
elif radian_distance > -angle_acceptable_overlap:
point = (point[0], point[1], nextpoint[2], point[3], nextpoint[4])
else:
raise Exception("overlap in occlusion map")
new_map.append(point)
occlusion_map = new_map
# taking completely empty chunks and replacing with ground bounds
# ground points may be absorbed, or ignored by preprocessing choice
# previous codes include more absorption reasoning --- not using atm
# aka some empty (high distance) regions may actual be absorbed detections
if ground_present is not None:
newmap = []
prevpivot = occlusion_map[0]
for seg_idx in range(1, len(occlusion_map)):
nextpivot = occlusion_map[seg_idx]
if prevpivot[2] != inf:
newmap.append(prevpivot)
prevpivot = nextpivot
continue
assert nextpivot[1]==inf
first_seg = prevpivot[4]
last_seg = nextpivot[3]
assert first_seg < last_seg
first_angle = prevpivot[0]
last_angle = nextpivot[0]
if first_angle-.05 < groundforlaser_angles[0]:
newmap.append(prevpivot)
prevpivot = nextpivot
continue
if last_angle+.049 > groundforlaser_angles[-1]:
newmap.append(prevpivot)
prevpivot = nextpivot
continue
groundfirstidx = np.searchsorted(groundforlaser_angles, first_angle-.05)
groundlastidx = np.searchsorted(groundforlaser_angles, last_angle-.05)
if ground_present[groundfirstidx] < inf:
prevpivot = (prevpivot[:2] + (ground_present[groundfirstidx],)
+ prevpivot[3:])
for groundidx in range(groundfirstidx, groundlastidx):
newpivot = (groundforlaser_angles[groundidx],
prevpivot[2], ground_present[groundidx+1],
first_seg+.5, first_seg+.5)
if newpivot[2] < inf or newpivot[3] < inf:
newmap.append(prevpivot)
prevpivot = newpivot
# otherwise, don't need to repeat infinite segments
if (groundlastidx > len(ground_present) and
ground_present[groundlastidx] < inf):
nextpivot = (nextpivot[:1] + (ground_present[groundlastidx],)
+ nextpivot[2:])
newmap.append(prevpivot)
prevpivot = nextpivot
newmap.append(prevpivot)
occlusion_map = newmap
return np.array(occlusion_map)
# line u,v vector with constant vx-uy=c
# ray goes from origin to x0,y0
def lineCrossesRay(u,v,c, x0,y0):
cross = x0*v - y0*u
# if cross ~= 0, line is parallel to ray
if abs(cross) < 1e-5:
return False, 0.
posonray = c / cross
# if c / cross < 0, line crosses behind origin wrt ray
# if > 1, passes past x0,y0
if posonray < 0 or posonray > 1:#.995:
return False, 0.
return True, (x0*u + y0*v)*posonray
""" how much can line extend in gap between lidar points?
for del_theta, u, v, extension del_v is
(u^2 + v^2) sin(del_theta) / (ucos(dth)-vsin(dth))
"""
# multiply by two in case point outlier/failure
correction_angle_resolution = .0031 * 2
jump_at = series_distance_cutoff**.5
def _angleCorrection(u, v):
u = abs(u)
if u < 1e-3:
if abs(v) < 1e-3:
# this object is basically touching the origin... shouldn't happen
return 0.
return inf # very big
else:
return correction_angle_resolution*(u*u + v*v)/u
# 1/9/2019 same as above, but makes the angle a little more Xtreme
# 1/14/2019 accounts for segmentation strategy, high addition if jump > segment_cutoff
def angleCorrection(u, v):
u = max(.001, abs(u)-.02)
if u < 1e-2 and abs(v) < 1e-2:
return 0. # object is basically touching the origin... shouldn't happen
jump = correction_angle_resolution*(u*u+v*v)/u
if jump > jump_at*2: # segmentation failure pretty much guaranteed
# print("high jump")
jump = inf
elif jump > jump_at*1.5: # segmentation failure possible # EDIT 2/7/19 to 1.5
jump += jump_at*2
return jump
""" reverse of angleCorrection, assuming small angle again """
def angleGap(u, vlo, vhi):
if vlo < 0: return 1. # high angle
denom = u*u+vlo*vhi
if denom <= 0: return 1.
return (vhi-vlo)*u/denom
visibility_cutoff = .003 # radians
def boundMeasurement(seg_idx, rec, occlusion_map):
u, v, max_ulo, min_uhi, max_vlo, min_vhi = rec
# if rec is too small, convert so it is straight
# this way, behind-occlusion is still accounted for
if min_uhi - max_ulo < .1 and min_vhi - max_vlo < .1:
centeru = (max_ulo+min_uhi)/2
centerv = (max_vlo+min_vhi)/2
centerx = centeru*u+centerv*v
centery = centeru*v-centerv*u
uvlen = hypot(centerx, centery)
rec = (-centery/uvlen, centerx/uvlen, -.05, .05, uvlen-.05, uvlen+.05)
u, v, max_ulo, min_uhi, max_vlo, min_vhi = rec
# full format = includes minimum and maximum bounds for each line
min_ulo = -inf
max_uhi = inf
min_vlo = -inf
max_vhi = inf
# for occlusion purposes, find which sides are visible
if max_ulo <= 0:
uvisible = True
vvisible = False
else:
angle_u = angleGap(max_vlo, max_ulo, min_uhi)
angle_v = angleGap(max_ulo, max_vlo, min_vhi)
if angle_u < angle_v and angle_u < visibility_cutoff:
uvisible = False
vvisible = True
elif angle_v < visibility_cutoff:
uvisible = True
vvisible = False
else:
uvisible = True
vvisible = True
if uvisible:
min_vlo = max_vlo
if vvisible:
min_ulo = max_ulo
# convert occlusion map into form useful for bounding
# the way we are currently bounding is:
# for each jump-segment (moving between occluding objects), see if this obj is visible
# only the further end of these jump segments is needed
# not using the actual object segments
# this will result in higher bounds, but never no bounds
further_end = np.maximum(occlusion_map[:,1], occlusion_map[:,2])
occlusion_map_points = np.array((np.cos(occlusion_map[:,0])*further_end,
np.sin(occlusion_map[:,0])*further_end)).T
# bound counter-clockwise first
bounding_cw = True
map_idx = np.searchsorted(occlusion_map[:,4], seg_idx)
occ_x = np.cos(occlusion_map[map_idx,0])*occlusion_map[map_idx,1]
occ_y = np.sin(occlusion_map[map_idx,0])*occlusion_map[map_idx,1]
if vvisible:
is_bound, bound = lineCrossesRay(v, -u, -max_ulo, occ_x, occ_y)
if is_bound:
max_vhi = min_vhi#bound
bounding_cw = False
else:
is_bound, bound = lineCrossesRay(u, v, max_vlo, occ_x, occ_y)
if is_bound:
min_ulo = max_ulo#bound
bounding_cw = False
# if uvisible:
# # 9/7/19 go ahead and do a simple bound on vhi using max_ulo (liberal)
# # will get rid of extreme cases of lines seen as rectangles
# is_bound, bound = lineCrossesRay(v, -u, -max_ulo, occ_x, occ_y)
# if is_bound:
# max_vhi = min_vhi
# find this measurement's index in the occlusion map
if bounding_cw and map_idx > 0:
for occ_x, occ_y in occlusion_map_points[map_idx-1::-1]:
# check occluded segment plane that extends in cw direction
if vvisible:
is_bound, bound = lineCrossesRay(v, -u, -max_ulo, occ_x, occ_y)
if is_bound:
max_vhi = bound
# the first located boundary should be the closest
break
else:
is_bound, bound = lineCrossesRay(u, v, max_vlo, occ_x, occ_y)
if is_bound:
min_ulo = bound
break
# next bound clockwise
bounding_cc = True
map_idx = np.searchsorted(occlusion_map[:,3], seg_idx+.1)-1
occ_x = np.cos(occlusion_map[map_idx,0])*occlusion_map[map_idx,2]
occ_y = np.sin(occlusion_map[map_idx,0])*occlusion_map[map_idx,2]
if uvisible:
is_bound, bound = lineCrossesRay(u, v, max_vlo, occ_x, occ_y)
if is_bound:
max_uhi = min_uhi#bound
bounding_cc = False
else:
is_bound, bound = lineCrossesRay(v, -u, -max_ulo, occ_x, occ_y)
if is_bound:
min_vlo = max_vlo#bound
bounding_cc = False
# if vvisible:
# # 9/7/19 go ahead and do a simple bound on vhi using max_ulo (liberal)
# # will get rid of extreme cases of lines seen as rectangles
# is_bound, bound = lineCrossesRay(u, v, max_vlo, occ_x, occ_y)
# if is_bound:
# max_uhi = min_uhi
if bounding_cc:
for occ_x, occ_y in occlusion_map_points[map_idx+1:]:
if uvisible:
is_bound, bound = lineCrossesRay(u, v, max_vlo, occ_x, occ_y)
if is_bound:
max_uhi = bound
break
else:
is_bound, bound = lineCrossesRay(v, -u, -max_ulo, occ_x, occ_y)
if is_bound:
min_vlo = bound
break
# if restraints in any direction are small, remove them
if max_ulo - min_ulo < 0:
min_ulo = max_ulo
if max_uhi - min_uhi < 0:
max_uhi = min_uhi
if max_vlo - min_vlo < 0:
min_vlo = max_vlo
if max_vhi - min_vhi < 0:
max_vhi = min_vhi
# add corrections for finite lidar resolution
if uvisible:
max_uhi += angleCorrection(min_vlo, max_uhi)
else:
min_vlo -= angleCorrection(min_ulo, min_vlo)
if vvisible:
max_vhi += angleCorrection(min_ulo, max_vhi)
else:
min_ulo -= angleCorrection(min_vlo, min_ulo)
# if restraints in any direction are small, remove them
assert max_ulo >= min_ulo
assert max_uhi >= min_uhi
assert max_vlo >= min_vlo
assert max_vhi >= min_vhi
return (u,v, max_ulo, min_uhi, max_vlo, min_vhi,
min_ulo, max_uhi, min_vlo, max_vhi)
"""
takes bounded rectangle, checks against car mold, and finds normal distribution
over xyalw parameters
"""
#variance_multiplier = 1.2
#msmt_noise = np.array((.2,.2,.25,.2,.2))
car_dims = ((2.95,4.9),(1.35,1.9),(1.25,2.))
car_dim_min_len, car_dim_max_len = car_dims[0][0]/2, car_dims[0][1]/2*1.2
car_dim_min_wid, car_dim_max_wid = car_dims[1][0]/2, car_dims[1][1]/2*1.2
def msmtBound2msmtNormal(rec):
u,v,maxulo,minuhi,maxvlo,minvhi,minulo,maxuhi,minvlo,maxvhi = rec
minulo = max(minulo, maxulo - 12) # otherwise, can have no idea where center is
maxuhi = min(maxuhi, minuhi + 12)
minvlo = max(minvlo, maxvlo - 12)
maxvhi = min(maxvhi, minvhi + 12)
uvT = np.array(((u,v),(v,-u)))
uvworks = (maxuhi-minulo > car_dim_min_len*2 and
minuhi-maxulo < car_dim_max_len*2 and
maxvhi-minvlo > car_dim_min_wid*2 and
minvhi-maxvlo < car_dim_max_wid*2)
if uvworks:
umean, ucov = uvBound2xyNormal(minulo,maxulo,minuhi,maxuhi,
car_dim_min_len, car_dim_max_len)
vmean, vcov = uvBound2xyNormal(minvlo,maxvlo,minvhi,maxvhi,
car_dim_min_wid, car_dim_max_wid)
xymean = np.array((umean[0]*u+vmean[0]*v, umean[0]*v-vmean[0]*u,
np.arctan2(v,u), umean[1], vmean[1]))
xycov = np.zeros((5,5))
xycov[[0,0,3,3],[0,3,0,3]] = ucov.reshape((-1,))
xycov[[1,1,4,4],[1,4,1,4]] = vcov.reshape((-1,))
xycov[:2,:] = uvT.dot(xycov[:2,:])
xycov[:,:2] = xycov[:,:2].dot(uvT.T)
uvout = (xymean, xycov)
else:
uvout = None
vuworks = (maxvhi-minvlo > car_dim_min_len*2 and
minvhi-maxvlo < car_dim_max_len*2 and
maxuhi-minulo > car_dim_min_wid*2 and
minuhi-maxulo < car_dim_max_wid*2)
if vuworks:
vmean, vcov = uvBound2xyNormal(minvlo,maxvlo,minvhi,maxvhi,
car_dim_min_len, car_dim_max_len)
umean, ucov = uvBound2xyNormal(minulo,maxulo,minuhi,maxuhi,
car_dim_min_wid, car_dim_max_wid)
xymean = np.array((umean[0]*u+vmean[0]*v, umean[0]*v-vmean[0]*u,
np.arctan2(-u,v), vmean[1], umean[1]))
xycov = np.zeros((5,5))
xycov[[0,0,4,4],[0,4,0,4]] = ucov.reshape((-1,))
xycov[[1,1,3,3],[1,3,1,3]] = vcov.reshape((-1,))
xycov[:2,:] = uvT.dot(xycov[:2,:])
xycov[:,:2] = xycov[:,:2].dot(uvT.T)
vuout = (xymean, xycov)
else:
vuout = None
return uvout, vuout
"""
look up point height in ground grid
if point z not given, return elevation (height from cam pov at ground)
else return height above ground
"""
def getGrndHeight(ground, x, y, z=None):
tilex = min(max(int(x/grndstep[0])-grndstart[0], 0), grndlen[0]-1)
tiley = min(max(int(y/grndstep[1])-grndstart[1], 0), grndlen[1]-1)
tile = ground[tilex,tiley]
if z is None:
return tile[3] - tile[0]*x - tile[1]*y
else:
return tile[0]*x+tile[1]*y+tile[2]*z-tile[3]
" lidar properties "
laser_angles = np.arange(0., 64)
laser_angles[:32] = -.33333*laser_angles[:32] + 3.
laser_angles[32:] = -.5*laser_angles[32:] + laser_angles[31] + 31*.5
laser_angles = np.tan(laser_angles * np.pi/180.)
#laser_angles = laser_angles[lasers]
laser_angles += -.02 # correction for observed angles
laser_intercepts = np.zeros(64)
laser_intercepts[:32] = .209 - .00036*np.arange(32,dtype=float)
laser_intercepts[32:] = .126 - .00032*np.arange(32,dtype=float)
laser_intercepts += 1.65
# laser angle space * current laser gap * some multiplier for missing lasers
height_angle_slack = .01*4*2
lasers = list(range(55))
laser_angles = laser_angles[lasers]
laser_intercepts = laser_intercepts[lasers]
## quanergy
#lasers2use = [0,6,15,25,33,39,45,50]
# more practical for camera fusion case, doesn't cover nearby objects that are easy
#lasers2use = [1,7,13,19,25]#,31,37]
lasers2use = []#[1,6,11,16,21,26]
"""
make sets of normally-distributed measurements from lidar
data = nX3 array as from kitti dataset
calib = 4x4 lidar-to-camera transformation matrix
ground = grid (size specified in config) of planes
view = tangent (lat/lon) of image view, or desired lidar view
"""
mindetectedlength4box = 1.
def processLidar(data, calib, ground, view, laser2use):
# # get angles of all ground points
# # this is cheating and using all 64 lasers atm
# # but making it use only available lasers requires two separate loops...
# # also consider not using pt-based check for ground in occlusion, just pass []
# heights = data.dot(calib[2,:3]) + calib[2,3]
# groundpts = (heights < .1) & (data[:,0] > .1)
# groundptangles = np.arctan2(data[groundpts,1], data[groundpts,0])
# groundptangles.sort()
# separate points by laser
# this will be repeated for every laser, and is pretty slow...
# easy target for speedup if necessary
starts = np.where(np.diff(np.sign(data[:,1])) > 0)[0]
starts = np.concatenate(([0], starts+1, [len(data)]))
true_starts = np.append(np.diff(starts) > 2, [True])
starts = starts[true_starts]
assert starts.shape[0] > 55
# subselect useful data from laser
pts = data[starts[laser2use]:starts[laser2use+1]]
include = pts[:,0] > 0
include &= abs(pts[:,1]) < pts[:,0]*view + 2.
include &= pts.dot(calib[2,:3]) + calib[2,3] > -.3
pts = pts[include]
# ensure sweep is contiguous
swap_idx = np.where(np.diff(np.arctan2(pts[:,1],pts[:,0]))<-.05)[0]
assert len(swap_idx) <= 1
if len(swap_idx) == 1:
swap_idx = swap_idx[0] + 1
pts = np.append(pts[swap_idx:], pts[:swap_idx], axis=0)
segs = segmentPoints(pts)
# classify segments by ground, worth using, etc
msmts = []
segsareground = []
segsinclude = []
for segidx, seg in enumerate(segs):
segmiddle = np.mean(seg,axis=0)
segmiddle = calib[:3,:3].dot(segmiddle) + calib[:3,3]
seggroundelev = getGrndHeight(ground, segmiddle[0], segmiddle[1])
heights = seg.dot(calib[2,:3])+calib[2,3]
segisground = max(heights)-seggroundelev < .3
seginclude = (not segisground) and segmiddle[2]-seggroundelev < 2.
## 9/17/19 LAST DITCH
#seginclude = (not segisground) and segmiddle[2]-seggroundelev < 1.5
segsareground.append(segisground)
segsinclude.append(seginclude)
msmts.append(makeMeasurement(seg))
# calculate/approximate occlusion along this laser sweep
ground_present = getGroundForLaser(ground, laser_angles[laser2use])
starting_angle = min(-view, np.arctan2(pts[0,1],pts[0,0])) # was view-.1
ending_angle = max(view, np.arctan2(pts[-1,1],pts[-1,0])) # put outside call
occlusion_map = makeOcclusionMap(segs, segsareground, msmts,
starting_angle, ending_angle, True, ground_present)
boxmsmts = []
fragmsmts = []
for seg_idx in range(len(segs)):
if not segsinclude[seg_idx]:
continue
msmt = boundMeasurement(seg_idx, msmts[seg_idx], occlusion_map)
if max(msmt[5]-msmt[4],msmt[3]-msmt[2]) < mindetectedlength4box:
msmt1,msmt2 = None,None
else:
msmt1, msmt2 = msmtBound2msmtNormal(msmt)
if msmt1 is not None:
mean, cov = msmt1
mean[:2] = calib[:2,:2].dot(mean[:2]) + calib[:2,2]
mean[2] += np.arctan2(calib[1,0],calib[0,0])
cov[:2,:] = calib[:2,:2].dot(cov[:2,:])
cov[:,:2] = cov[:,:2].dot(calib[:2,:2].T)
boxmsmts.append((mean, cov))
if msmt2 is not None:
mean, cov = msmt2
mean[:2] = calib[:2,:2].dot(mean[:2]) + calib[:2,2]
mean[2] += np.arctan2(calib[1,0],calib[0,0])
cov[:2,:] = calib[:2,:2].dot(cov[:2,:])
cov[:,:2] = cov[:,:2].dot(calib[:2,:2].T)
boxmsmts.append((mean, cov))
if msmt1 is None and msmt2 is None:
# make a misc msmt
# which is only position
# 9/3/19 only include too-small fragments, not too-large ones
nottoobig = ((msmt[3]-msmt[2] < car_dim_max_len*2) and
(msmt[5]-msmt[4] < car_dim_max_wid*2)) or (
(msmt[3]-msmt[2] < car_dim_max_wid*2) and
(msmt[5]-msmt[4] < car_dim_max_len*2))
# 9/15/19 don't include things below a certain size
nottoosmall = max(msmt[5]-msmt[4],msmt[3]-msmt[2]) > .4
if nottoobig and nottoosmall:
mean = calib[:2,:2].dot(uv2xy(msmt[:6])[:2])+calib[:2,2]
fragmsmts.append(mean)
# find minimum distances at which cars might be hit
# at closer distances, lidar is too high
detectzone = getGroundForLaser(ground, laser_angles[laser2use],
laser_height=1.65-1.2)
return (detectzone, occlusion_map[:,:3], boxmsmts, fragmsmts) | [
"rectools.uv2xy",
"numpy.array",
"numpy.arctan2",
"numpy.sin",
"math.hypot",
"numpy.arange",
"numpy.mean",
"numpy.searchsorted",
"numpy.where",
"numpy.delete",
"numpy.diff",
"numpy.maximum",
"rectools.uvBound2xyNormal",
"numpy.argmax",
"numpy.cos",
"math.atan2",
"numpy.sign",
"nump... | [((3772, 3814), 'numpy.arange', 'np.arange', (['(0)', 'halfpi', 'shrinksearch_initres'], {}), '(0, halfpi, shrinksearch_initres)\n', (3781, 3814), True, 'import numpy as np\n'), ((5644, 5670), 'numpy.arange', 'np.arange', (['(-1.35)', '(1.4)', '(0.1)'], {}), '(-1.35, 1.4, 0.1)\n', (5653, 5670), True, 'import numpy as np\n'), ((24337, 24355), 'numpy.arange', 'np.arange', (['(0.0)', '(64)'], {}), '(0.0, 64)\n', (24346, 24355), True, 'import numpy as np\n'), ((24490, 24526), 'numpy.tan', 'np.tan', (['(laser_angles * np.pi / 180.0)'], {}), '(laser_angles * np.pi / 180.0)\n', (24496, 24526), True, 'import numpy as np\n'), ((24634, 24646), 'numpy.zeros', 'np.zeros', (['(64)'], {}), '(64)\n', (24642, 24646), True, 'import numpy as np\n'), ((427, 457), 'numpy.diff', 'np.diff', (['points[:, :2]'], {'axis': '(0)'}), '(points[:, :2], axis=0)\n', (434, 457), True, 'import numpy as np\n'), ((3929, 3963), 'numpy.arctan2', 'np.arctan2', (['pts[-1, 1]', 'pts[-1, 0]'], {}), '(pts[-1, 1], pts[-1, 0])\n', (3939, 3963), True, 'import numpy as np\n'), ((4906, 4934), 'numpy.minimum', 'np.minimum', (['(u - ulo)', '(uhi - u)'], {}), '(u - ulo, uhi - u)\n', (4916, 4934), True, 'import numpy as np\n'), ((4943, 4971), 'numpy.minimum', 'np.minimum', (['(v - vlo)', '(vhi - v)'], {}), '(v - vlo, vhi - v)\n', (4953, 4971), True, 'import numpy as np\n'), ((5792, 5829), 'numpy.zeros', 'np.zeros', (['groundforlaser_angles.shape'], {}), '(groundforlaser_angles.shape)\n', (5800, 5829), True, 'import numpy as np\n'), ((13384, 13407), 'numpy.array', 'np.array', (['occlusion_map'], {}), '(occlusion_map)\n', (13392, 13407), True, 'import numpy as np\n'), ((17072, 17124), 'numpy.maximum', 'np.maximum', (['occlusion_map[:, 1]', 'occlusion_map[:, 2]'], {}), '(occlusion_map[:, 1], occlusion_map[:, 2])\n', (17082, 17124), True, 'import numpy as np\n'), ((17358, 17403), 'numpy.searchsorted', 'np.searchsorted', (['occlusion_map[:, 4]', 'seg_idx'], {}), '(occlusion_map[:, 4], seg_idx)\n', (17373, 17403), True, 'import numpy as np\n'), ((21868, 21895), 'numpy.array', 'np.array', (['((u, v), (v, -u))'], {}), '(((u, v), (v, -u)))\n', (21876, 21895), True, 'import numpy as np\n'), ((523, 564), 'numpy.where', 'np.where', (['(distsq > series_distance_cutoff)'], {}), '(distsq > series_distance_cutoff)\n', (531, 564), True, 'import numpy as np\n'), ((1732, 1817), 'numpy.delete', 'np.delete', (['points[fixed_cuts[idx]:fixed_cuts[idx + 1]]', 'outlier_idxs[idx]'], {'axis': '(0)'}), '(points[fixed_cuts[idx]:fixed_cuts[idx + 1]], outlier_idxs[idx],\n axis=0)\n', (1741, 1817), True, 'import numpy as np\n'), ((2323, 2334), 'math.hypot', 'hypot', (['u', 'v'], {}), '(u, v)\n', (2328, 2334), False, 'from math import hypot, atan2\n'), ((2409, 2438), 'numpy.argmax', 'np.argmax', (['points_distconcave'], {}), '(points_distconcave)\n', (2418, 2438), True, 'import numpy as np\n'), ((4983, 5007), 'numpy.minimum', 'np.minimum', (['udist', 'vdist'], {}), '(udist, vdist)\n', (4993, 5007), True, 'import numpy as np\n'), ((5112, 5140), 'numpy.minimum', 'np.minimum', (['(u - ulo)', '(v - vlo)'], {}), '(u - ulo, v - vlo)\n', (5122, 5140), True, 'import numpy as np\n'), ((6083, 6096), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (6089, 6096), True, 'import numpy as np\n'), ((6110, 6123), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (6116, 6123), True, 'import numpy as np\n'), ((8122, 8138), 'numpy.zeros', 'np.zeros', (['(0, 5)'], {}), '((0, 5))\n', (8130, 8138), True, 'import numpy as np\n'), ((8664, 8701), 'math.atan2', 'atan2', (['first_point[1]', 'first_point[0]'], {}), '(first_point[1], first_point[0])\n', (8669, 8701), False, 'from math import hypot, atan2\n'), ((8723, 8758), 'math.atan2', 'atan2', (['last_point[1]', 'last_point[0]'], {}), '(last_point[1], last_point[0])\n', (8728, 8758), False, 'from math import hypot, atan2\n'), ((15751, 15774), 'math.hypot', 'hypot', (['centerx', 'centery'], {}), '(centerx, centery)\n', (15756, 15774), False, 'from math import hypot, atan2\n'), ((17415, 17448), 'numpy.cos', 'np.cos', (['occlusion_map[map_idx, 0]'], {}), '(occlusion_map[map_idx, 0])\n', (17421, 17448), True, 'import numpy as np\n'), ((17485, 17518), 'numpy.sin', 'np.sin', (['occlusion_map[map_idx, 0]'], {}), '(occlusion_map[map_idx, 0])\n', (17491, 17518), True, 'import numpy as np\n'), ((18955, 19006), 'numpy.searchsorted', 'np.searchsorted', (['occlusion_map[:, 3]', '(seg_idx + 0.1)'], {}), '(occlusion_map[:, 3], seg_idx + 0.1)\n', (18970, 19006), True, 'import numpy as np\n'), ((19017, 19050), 'numpy.cos', 'np.cos', (['occlusion_map[map_idx, 0]'], {}), '(occlusion_map[map_idx, 0])\n', (19023, 19050), True, 'import numpy as np\n'), ((19087, 19120), 'numpy.sin', 'np.sin', (['occlusion_map[map_idx, 0]'], {}), '(occlusion_map[map_idx, 0])\n', (19093, 19120), True, 'import numpy as np\n'), ((22141, 22227), 'rectools.uvBound2xyNormal', 'uvBound2xyNormal', (['minulo', 'maxulo', 'minuhi', 'maxuhi', 'car_dim_min_len', 'car_dim_max_len'], {}), '(minulo, maxulo, minuhi, maxuhi, car_dim_min_len,\n car_dim_max_len)\n', (22157, 22227), False, 'from rectools import uv2xy, fitRecToMold, uvBound2xyNormal\n'), ((22282, 22368), 'rectools.uvBound2xyNormal', 'uvBound2xyNormal', (['minvlo', 'maxvlo', 'minvhi', 'maxvhi', 'car_dim_min_wid', 'car_dim_max_wid'], {}), '(minvlo, maxvlo, minvhi, maxvhi, car_dim_min_wid,\n car_dim_max_wid)\n', (22298, 22368), False, 'from rectools import uv2xy, fitRecToMold, uvBound2xyNormal\n'), ((22555, 22571), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (22563, 22571), True, 'import numpy as np\n'), ((23084, 23170), 'rectools.uvBound2xyNormal', 'uvBound2xyNormal', (['minvlo', 'maxvlo', 'minvhi', 'maxvhi', 'car_dim_min_len', 'car_dim_max_len'], {}), '(minvlo, maxvlo, minvhi, maxvhi, car_dim_min_len,\n car_dim_max_len)\n', (23100, 23170), False, 'from rectools import uv2xy, fitRecToMold, uvBound2xyNormal\n'), ((23225, 23311), 'rectools.uvBound2xyNormal', 'uvBound2xyNormal', (['minulo', 'maxulo', 'minuhi', 'maxuhi', 'car_dim_min_wid', 'car_dim_max_wid'], {}), '(minulo, maxulo, minuhi, maxuhi, car_dim_min_wid,\n car_dim_max_wid)\n', (23241, 23311), False, 'from rectools import uv2xy, fitRecToMold, uvBound2xyNormal\n'), ((23499, 23515), 'numpy.zeros', 'np.zeros', (['(5, 5)'], {}), '((5, 5))\n', (23507, 23515), True, 'import numpy as np\n'), ((24685, 24711), 'numpy.arange', 'np.arange', (['(32)'], {'dtype': 'float'}), '(32, dtype=float)\n', (24694, 24711), True, 'import numpy as np\n'), ((24749, 24775), 'numpy.arange', 'np.arange', (['(32)'], {'dtype': 'float'}), '(32, dtype=float)\n', (24758, 24775), True, 'import numpy as np\n'), ((26871, 26920), 'numpy.append', 'np.append', (['pts[swap_idx:]', 'pts[:swap_idx]'], {'axis': '(0)'}), '(pts[swap_idx:], pts[:swap_idx], axis=0)\n', (26880, 26920), True, 'import numpy as np\n'), ((27123, 27143), 'numpy.mean', 'np.mean', (['seg'], {'axis': '(0)'}), '(seg, axis=0)\n', (27130, 27143), True, 'import numpy as np\n'), ((27854, 27886), 'numpy.arctan2', 'np.arctan2', (['pts[0, 1]', 'pts[0, 0]'], {}), '(pts[0, 1], pts[0, 0])\n', (27864, 27886), True, 'import numpy as np\n'), ((27928, 27962), 'numpy.arctan2', 'np.arctan2', (['pts[-1, 1]', 'pts[-1, 0]'], {}), '(pts[-1, 1], pts[-1, 0])\n', (27938, 27962), True, 'import numpy as np\n'), ((3147, 3158), 'math.hypot', 'hypot', (['u', 'v'], {}), '(u, v)\n', (3152, 3158), False, 'from math import hypot, atan2\n'), ((8940, 8979), 'math.hypot', 'hypot', (['median_point[0]', 'median_point[1]'], {}), '(median_point[0], median_point[1])\n', (8945, 8979), False, 'from math import hypot, atan2\n'), ((9042, 9081), 'math.atan2', 'atan2', (['median_point[1]', 'median_point[0]'], {}), '(median_point[1], median_point[0])\n', (9047, 9081), False, 'from math import hypot, atan2\n'), ((9285, 9322), 'math.hypot', 'hypot', (['first_point[0]', 'first_point[1]'], {}), '(first_point[0], first_point[1])\n', (9290, 9322), False, 'from math import hypot, atan2\n'), ((9347, 9382), 'math.hypot', 'hypot', (['last_point[0]', 'last_point[1]'], {}), '(last_point[0], last_point[1])\n', (9352, 9382), False, 'from math import hypot, atan2\n'), ((12196, 12254), 'numpy.searchsorted', 'np.searchsorted', (['groundforlaser_angles', '(first_angle - 0.05)'], {}), '(groundforlaser_angles, first_angle - 0.05)\n', (12211, 12254), True, 'import numpy as np\n'), ((12280, 12337), 'numpy.searchsorted', 'np.searchsorted', (['groundforlaser_angles', '(last_angle - 0.05)'], {}), '(groundforlaser_angles, last_angle - 0.05)\n', (12295, 12337), True, 'import numpy as np\n'), ((26313, 26328), 'numpy.diff', 'np.diff', (['starts'], {}), '(starts)\n', (26320, 26328), True, 'import numpy as np\n'), ((28654, 28690), 'numpy.arctan2', 'np.arctan2', (['calib[1, 0]', 'calib[0, 0]'], {}), '(calib[1, 0], calib[0, 0])\n', (28664, 28690), True, 'import numpy as np\n'), ((28982, 29018), 'numpy.arctan2', 'np.arctan2', (['calib[1, 0]', 'calib[0, 0]'], {}), '(calib[1, 0], calib[0, 0])\n', (28992, 29018), True, 'import numpy as np\n'), ((4191, 4204), 'numpy.cos', 'np.cos', (['angle'], {}), '(angle)\n', (4197, 4204), True, 'import numpy as np\n'), ((4206, 4219), 'numpy.sin', 'np.sin', (['angle'], {}), '(angle)\n', (4212, 4219), True, 'import numpy as np\n'), ((9616, 9641), 'math.atan2', 'atan2', (['corner_y', 'corner_x'], {}), '(corner_y, corner_x)\n', (9621, 9641), False, 'from math import hypot, atan2\n'), ((22501, 22517), 'numpy.arctan2', 'np.arctan2', (['v', 'u'], {}), '(v, u)\n', (22511, 22517), True, 'import numpy as np\n'), ((23444, 23461), 'numpy.arctan2', 'np.arctan2', (['(-u)', 'v'], {}), '(-u, v)\n', (23454, 23461), True, 'import numpy as np\n'), ((9830, 9855), 'math.hypot', 'hypot', (['corner_y', 'corner_x'], {}), '(corner_y, corner_x)\n', (9835, 9855), False, 'from math import hypot, atan2\n'), ((17160, 17187), 'numpy.cos', 'np.cos', (['occlusion_map[:, 0]'], {}), '(occlusion_map[:, 0])\n', (17166, 17187), True, 'import numpy as np\n'), ((17237, 17264), 'numpy.sin', 'np.sin', (['occlusion_map[:, 0]'], {}), '(occlusion_map[:, 0])\n', (17243, 17264), True, 'import numpy as np\n'), ((26199, 26218), 'numpy.sign', 'np.sign', (['data[:, 1]'], {}), '(data[:, 1])\n', (26206, 26218), True, 'import numpy as np\n'), ((26725, 26757), 'numpy.arctan2', 'np.arctan2', (['pts[:, 1]', 'pts[:, 0]'], {}), '(pts[:, 1], pts[:, 0])\n', (26735, 26757), True, 'import numpy as np\n'), ((29830, 29845), 'rectools.uv2xy', 'uv2xy', (['msmt[:6]'], {}), '(msmt[:6])\n', (29835, 29845), False, 'from rectools import uv2xy, fitRecToMold, uvBound2xyNormal\n')] |
from .temporal import *
import numpy as np
defaults = dict(
ydeg=15,
udeg=2,
r=20.0,
dr=None,
a=0.40,
b=0.27,
c=0.1,
n=10.0,
p=1.0,
i=60.0,
u=np.zeros(30),
tau=None,
temporal_kernel=Matern32Kernel,
normalized=True,
normalization_order=20,
normalization_zmax=0.023,
marginalize_over_inclination=True,
baseline_mean=0.0,
baseline_var=0.0,
driver="numpy",
eps=1e-8,
epsy=1e-12,
epsy15=1e-9,
covpts=300,
log_alpha_max=10,
log_beta_max=10,
abmin=1e-12,
sigma_max=45.0,
mx=300,
my=150,
)
| [
"numpy.zeros"
] | [((187, 199), 'numpy.zeros', 'np.zeros', (['(30)'], {}), '(30)\n', (195, 199), True, 'import numpy as np\n')] |
# -*- coding: utf-8 -*-
"""
Workflow functions for bulding tasks
"""
import numpy
import pandas
import pandas_flavor
def split_three_ways(array):
"""
Does not perform statisfied sampling
Assumes shuffled
Splits into 3:1:1 ratio
"""
percent_60 = int(.6*array.shape[0])
percent_80 = int(.8*array.shape[0])
train, validate, test = numpy.split(
array, [percent_60, percent_80])
return train, validate, test
def split_two_ways(array, percentage = 0.2):
"""
Assumes shuffled
Percentage must be between 0 and 1 (right inclusive)
TODO: Address bug in which they don't evenly split
"""
assert 0 < percentage <= 1
train, test = numpy.split(array, percentage)
return train, test
@pandas_flavor.register_dataframe_method
def scale(dataframe, array_scaler=None):
"""
Must work on numpy arrays (standard)
Calls scaler function on dataframe.values,
then creates new dataframe with same columns
"""
if array_scaler is None:
return dataframe
list_columns = dataframe.columns.tolist()
array = dataframe.values
array_scaled = array_scaler(array)
df = pandas.DataFrame(array_scaled, columns = list_columns)
return df | [
"pandas.DataFrame",
"numpy.split"
] | [((362, 406), 'numpy.split', 'numpy.split', (['array', '[percent_60, percent_80]'], {}), '(array, [percent_60, percent_80])\n', (373, 406), False, 'import numpy\n'), ((698, 728), 'numpy.split', 'numpy.split', (['array', 'percentage'], {}), '(array, percentage)\n', (709, 728), False, 'import numpy\n'), ((1173, 1225), 'pandas.DataFrame', 'pandas.DataFrame', (['array_scaled'], {'columns': 'list_columns'}), '(array_scaled, columns=list_columns)\n', (1189, 1225), False, 'import pandas\n')] |
import numpy as np
import pytest
import math
from sklearn.base import clone
from sklearn.linear_model import Lasso, ElasticNet
import doubleml as dml
from ._utils import draw_smpls
from ._utils_plr_manual import fit_plr, boot_plr, tune_nuisance_plr
@pytest.fixture(scope='module',
params=[Lasso(),
ElasticNet()])
def learner_g(request):
return request.param
@pytest.fixture(scope='module',
params=[Lasso(),
ElasticNet()])
def learner_m(request):
return request.param
@pytest.fixture(scope='module',
params=['partialling out'])
def score(request):
return request.param
@pytest.fixture(scope='module',
params=['dml2'])
def dml_procedure(request):
return request.param
@pytest.fixture(scope='module',
params=[True, False])
def tune_on_folds(request):
return request.param
def get_par_grid(learner):
if learner.__class__ == Lasso:
par_grid = {'alpha': np.linspace(0.05, .95, 7)}
else:
assert learner.__class__ == ElasticNet
par_grid = {'l1_ratio': [.1, .5, .7, .9, .95, .99, 1], 'alpha': np.linspace(0.05, 1., 7)}
return par_grid
@pytest.fixture(scope="module")
def dml_plr_fixture(generate_data2, learner_g, learner_m, score, dml_procedure, tune_on_folds):
par_grid = {'ml_g': get_par_grid(learner_g),
'ml_m': get_par_grid(learner_m)}
n_folds_tune = 4
boot_methods = ['normal']
n_folds = 2
n_rep_boot = 502
# collect data
obj_dml_data = generate_data2
# Set machine learning methods for m & g
ml_g = clone(learner_g)
ml_m = clone(learner_m)
np.random.seed(3141)
dml_plr_obj = dml.DoubleMLPLR(obj_dml_data,
ml_g, ml_m,
n_folds,
score=score,
dml_procedure=dml_procedure)
# tune hyperparameters
_ = dml_plr_obj.tune(par_grid, tune_on_folds=tune_on_folds, n_folds_tune=n_folds_tune)
# fit with tuned parameters
dml_plr_obj.fit()
np.random.seed(3141)
y = obj_dml_data.y
x = obj_dml_data.x
d = obj_dml_data.d
n_obs = len(y)
all_smpls = draw_smpls(n_obs, n_folds)
smpls = all_smpls[0]
if tune_on_folds:
g_params, m_params = tune_nuisance_plr(y, x, d,
clone(learner_g), clone(learner_m), smpls, n_folds_tune,
par_grid['ml_g'], par_grid['ml_m'])
else:
xx = [(np.arange(len(y)), np.array([]))]
g_params, m_params = tune_nuisance_plr(y, x, d,
clone(learner_g), clone(learner_m), xx, n_folds_tune,
par_grid['ml_g'], par_grid['ml_m'])
g_params = g_params * n_folds
m_params = m_params * n_folds
res_manual = fit_plr(y, x, d, clone(learner_g), clone(learner_m),
all_smpls, dml_procedure, score,
g_params=g_params, m_params=m_params)
res_dict = {'coef': dml_plr_obj.coef,
'coef_manual': res_manual['theta'],
'se': dml_plr_obj.se,
'se_manual': res_manual['se'],
'boot_methods': boot_methods}
for bootstrap in boot_methods:
np.random.seed(3141)
boot_theta, boot_t_stat = boot_plr(y, d, res_manual['thetas'], res_manual['ses'],
res_manual['all_g_hat'], res_manual['all_m_hat'],
all_smpls, score, bootstrap, n_rep_boot)
np.random.seed(3141)
dml_plr_obj.bootstrap(method=bootstrap, n_rep_boot=n_rep_boot)
res_dict['boot_coef' + bootstrap] = dml_plr_obj.boot_coef
res_dict['boot_t_stat' + bootstrap] = dml_plr_obj.boot_t_stat
res_dict['boot_coef' + bootstrap + '_manual'] = boot_theta
res_dict['boot_t_stat' + bootstrap + '_manual'] = boot_t_stat
return res_dict
@pytest.mark.ci
def test_dml_plr_coef(dml_plr_fixture):
assert math.isclose(dml_plr_fixture['coef'],
dml_plr_fixture['coef_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_se(dml_plr_fixture):
assert math.isclose(dml_plr_fixture['se'],
dml_plr_fixture['se_manual'],
rel_tol=1e-9, abs_tol=1e-4)
@pytest.mark.ci
def test_dml_plr_boot(dml_plr_fixture):
for bootstrap in dml_plr_fixture['boot_methods']:
assert np.allclose(dml_plr_fixture['boot_coef' + bootstrap],
dml_plr_fixture['boot_coef' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
assert np.allclose(dml_plr_fixture['boot_t_stat' + bootstrap],
dml_plr_fixture['boot_t_stat' + bootstrap + '_manual'],
rtol=1e-9, atol=1e-4)
| [
"numpy.allclose",
"sklearn.linear_model.ElasticNet",
"math.isclose",
"sklearn.linear_model.Lasso",
"sklearn.base.clone",
"doubleml.DoubleMLPLR",
"numpy.array",
"numpy.linspace",
"numpy.random.seed",
"pytest.fixture"
] | [((566, 624), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': "['partialling out']"}), "(scope='module', params=['partialling out'])\n", (580, 624), False, 'import pytest\n'), ((689, 736), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': "['dml2']"}), "(scope='module', params=['dml2'])\n", (703, 736), False, 'import pytest\n'), ((809, 861), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""', 'params': '[True, False]'}), "(scope='module', params=[True, False])\n", (823, 861), False, 'import pytest\n'), ((1229, 1259), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), "(scope='module')\n", (1243, 1259), False, 'import pytest\n'), ((1654, 1670), 'sklearn.base.clone', 'clone', (['learner_g'], {}), '(learner_g)\n', (1659, 1670), False, 'from sklearn.base import clone\n'), ((1682, 1698), 'sklearn.base.clone', 'clone', (['learner_m'], {}), '(learner_m)\n', (1687, 1698), False, 'from sklearn.base import clone\n'), ((1704, 1724), 'numpy.random.seed', 'np.random.seed', (['(3141)'], {}), '(3141)\n', (1718, 1724), True, 'import numpy as np\n'), ((1743, 1839), 'doubleml.DoubleMLPLR', 'dml.DoubleMLPLR', (['obj_dml_data', 'ml_g', 'ml_m', 'n_folds'], {'score': 'score', 'dml_procedure': 'dml_procedure'}), '(obj_dml_data, ml_g, ml_m, n_folds, score=score,\n dml_procedure=dml_procedure)\n', (1758, 1839), True, 'import doubleml as dml\n'), ((2151, 2171), 'numpy.random.seed', 'np.random.seed', (['(3141)'], {}), '(3141)\n', (2165, 2171), True, 'import numpy as np\n'), ((4183, 4287), 'math.isclose', 'math.isclose', (["dml_plr_fixture['coef']", "dml_plr_fixture['coef_manual']"], {'rel_tol': '(1e-09)', 'abs_tol': '(0.0001)'}), "(dml_plr_fixture['coef'], dml_plr_fixture['coef_manual'],\n rel_tol=1e-09, abs_tol=0.0001)\n", (4195, 4287), False, 'import math\n'), ((4396, 4497), 'math.isclose', 'math.isclose', (["dml_plr_fixture['se']", "dml_plr_fixture['se_manual']"], {'rel_tol': '(1e-09)', 'abs_tol': '(0.0001)'}), "(dml_plr_fixture['se'], dml_plr_fixture['se_manual'], rel_tol=\n 1e-09, abs_tol=0.0001)\n", (4408, 4497), False, 'import math\n'), ((3004, 3020), 'sklearn.base.clone', 'clone', (['learner_g'], {}), '(learner_g)\n', (3009, 3020), False, 'from sklearn.base import clone\n'), ((3022, 3038), 'sklearn.base.clone', 'clone', (['learner_m'], {}), '(learner_m)\n', (3027, 3038), False, 'from sklearn.base import clone\n'), ((3431, 3451), 'numpy.random.seed', 'np.random.seed', (['(3141)'], {}), '(3141)\n', (3445, 3451), True, 'import numpy as np\n'), ((3728, 3748), 'numpy.random.seed', 'np.random.seed', (['(3141)'], {}), '(3141)\n', (3742, 3748), True, 'import numpy as np\n'), ((4665, 4802), 'numpy.allclose', 'np.allclose', (["dml_plr_fixture['boot_coef' + bootstrap]", "dml_plr_fixture['boot_coef' + bootstrap + '_manual']"], {'rtol': '(1e-09)', 'atol': '(0.0001)'}), "(dml_plr_fixture['boot_coef' + bootstrap], dml_plr_fixture[\n 'boot_coef' + bootstrap + '_manual'], rtol=1e-09, atol=0.0001)\n", (4676, 4802), True, 'import numpy as np\n'), ((4864, 5005), 'numpy.allclose', 'np.allclose', (["dml_plr_fixture['boot_t_stat' + bootstrap]", "dml_plr_fixture['boot_t_stat' + bootstrap + '_manual']"], {'rtol': '(1e-09)', 'atol': '(0.0001)'}), "(dml_plr_fixture['boot_t_stat' + bootstrap], dml_plr_fixture[\n 'boot_t_stat' + bootstrap + '_manual'], rtol=1e-09, atol=0.0001)\n", (4875, 5005), True, 'import numpy as np\n'), ((311, 318), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (316, 318), False, 'from sklearn.linear_model import Lasso, ElasticNet\n'), ((344, 356), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (354, 356), False, 'from sklearn.linear_model import Lasso, ElasticNet\n'), ((466, 473), 'sklearn.linear_model.Lasso', 'Lasso', ([], {}), '()\n', (471, 473), False, 'from sklearn.linear_model import Lasso, ElasticNet\n'), ((499, 511), 'sklearn.linear_model.ElasticNet', 'ElasticNet', ([], {}), '()\n', (509, 511), False, 'from sklearn.linear_model import Lasso, ElasticNet\n'), ((1024, 1050), 'numpy.linspace', 'np.linspace', (['(0.05)', '(0.95)', '(7)'], {}), '(0.05, 0.95, 7)\n', (1035, 1050), True, 'import numpy as np\n'), ((1180, 1205), 'numpy.linspace', 'np.linspace', (['(0.05)', '(1.0)', '(7)'], {}), '(0.05, 1.0, 7)\n', (1191, 1205), True, 'import numpy as np\n'), ((2454, 2470), 'sklearn.base.clone', 'clone', (['learner_g'], {}), '(learner_g)\n', (2459, 2470), False, 'from sklearn.base import clone\n'), ((2472, 2488), 'sklearn.base.clone', 'clone', (['learner_m'], {}), '(learner_m)\n', (2477, 2488), False, 'from sklearn.base import clone\n'), ((2756, 2772), 'sklearn.base.clone', 'clone', (['learner_g'], {}), '(learner_g)\n', (2761, 2772), False, 'from sklearn.base import clone\n'), ((2774, 2790), 'sklearn.base.clone', 'clone', (['learner_m'], {}), '(learner_m)\n', (2779, 2790), False, 'from sklearn.base import clone\n'), ((2638, 2650), 'numpy.array', 'np.array', (['[]'], {}), '([])\n', (2646, 2650), True, 'import numpy as np\n')] |
# Copyright 2020-2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""MaskRcnn tpositive and negative sample screening for Rcnn."""
import numpy as np
import mindspore.nn as nn
import mindspore.common.dtype as mstype
from mindspore.ops import operations as P
from mindspore.common.tensor import Tensor
from mindspore import context
class BboxAssignSampleForRcnn(nn.Cell):
"""
Bbox assigner and sampler definition.
Args:
config (dict): Config.
batch_size (int): Batchsize.
num_bboxes (int): The anchor nums.
add_gt_as_proposals (bool): add gt bboxes as proposals flag.
Returns:
Tensor, multiple output tensors.
Examples:
BboxAssignSampleForRcnn(config, 2, 1024, True)
"""
def __init__(self, config, batch_size, num_bboxes, add_gt_as_proposals):
super(BboxAssignSampleForRcnn, self).__init__()
cfg = config
if context.get_context("device_target") == "Ascend":
self.cast_type = mstype.float16
self.np_cast_type = np.float16
else:
self.cast_type = mstype.float32
self.np_cast_type = np.float32
self.batch_size = batch_size
self.neg_iou_thr = cfg.neg_iou_thr_stage2
self.pos_iou_thr = cfg.pos_iou_thr_stage2
self.min_pos_iou = cfg.min_pos_iou_stage2
self.num_gts = cfg.num_gts
self.num_bboxes = num_bboxes
self.num_expected_pos = cfg.num_expected_pos_stage2
self.num_expected_neg = cfg.num_expected_neg_stage2
self.num_expected_total = cfg.num_expected_total_stage2
self.add_gt_as_proposals = add_gt_as_proposals
self.label_inds = Tensor(np.arange(1, self.num_gts + 1).astype(np.int32))
self.add_gt_as_proposals_valid = Tensor(np.array(self.add_gt_as_proposals * np.ones(self.num_gts),
dtype=np.int32))
self.concat = P.Concat(axis=0)
self.max_gt = P.ArgMaxWithValue(axis=0)
self.max_anchor = P.ArgMaxWithValue(axis=1)
self.sum_inds = P.ReduceSum()
self.iou = P.IOU()
self.greaterequal = P.GreaterEqual()
self.greater = P.Greater()
self.select = P.Select()
self.gatherND = P.GatherNd()
self.squeeze = P.Squeeze()
self.cast = P.Cast()
self.logicaland = P.LogicalAnd()
self.less = P.Less()
self.random_choice_with_mask_pos = P.RandomChoiceWithMask(self.num_expected_pos)
self.random_choice_with_mask_neg = P.RandomChoiceWithMask(self.num_expected_neg)
self.reshape = P.Reshape()
self.equal = P.Equal()
self.bounding_box_encode = P.BoundingBoxEncode(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2))
self.concat_axis1 = P.Concat(axis=1)
self.logicalnot = P.LogicalNot()
self.tile = P.Tile()
# Check
self.check_gt_one = Tensor(np.array(-1 * np.ones((self.num_gts, 4)), dtype=self.np_cast_type))
self.check_anchor_two = Tensor(np.array(-2 * np.ones((self.num_bboxes, 4)), dtype=self.np_cast_type))
# Init tensor
self.assigned_gt_inds = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))
self.assigned_gt_zeros = Tensor(np.array(np.zeros(num_bboxes), dtype=np.int32))
self.assigned_gt_ones = Tensor(np.array(np.ones(num_bboxes), dtype=np.int32))
self.assigned_gt_ignores = Tensor(np.array(-1 * np.ones(num_bboxes), dtype=np.int32))
self.assigned_pos_ones = Tensor(np.array(np.ones(self.num_expected_pos), dtype=np.int32))
self.gt_ignores = Tensor(np.array(-1 * np.ones(self.num_gts), dtype=np.int32))
self.range_pos_size = Tensor(np.arange(self.num_expected_pos).astype(self.np_cast_type))
self.check_neg_mask = Tensor(np.array(np.ones(self.num_expected_neg - self.num_expected_pos), dtype=np.bool))
self.bboxs_neg_mask = Tensor(np.zeros((self.num_expected_neg, 4), dtype=self.np_cast_type))
self.labels_neg_mask = Tensor(np.array(np.zeros(self.num_expected_neg), dtype=np.uint8))
self.reshape_shape_pos = (self.num_expected_pos, 1)
self.reshape_shape_neg = (self.num_expected_neg, 1)
self.scalar_zero = Tensor(0.0, dtype=self.cast_type)
self.scalar_neg_iou_thr = Tensor(self.neg_iou_thr, dtype=self.cast_type)
self.scalar_pos_iou_thr = Tensor(self.pos_iou_thr, dtype=self.cast_type)
self.scalar_min_pos_iou = Tensor(self.min_pos_iou, dtype=self.cast_type)
self.expand_dims = P.ExpandDims()
self.split = P.Split(axis=1, output_num=4)
self.concat_last_axis = P.Concat(axis=-1)
self.round = P.Round()
self.image_h_w = Tensor([cfg.img_height, cfg.img_width, cfg.img_height, cfg.img_width], dtype=self.cast_type)
self.range = nn.Range(start=0, limit=cfg.num_expected_pos_stage2)
self.crop_and_resize = P.CropAndResize(method="bilinear_v2")
self.mask_shape = (cfg.mask_shape[0], cfg.mask_shape[1])
self.squeeze_mask_last = P.Squeeze(axis=-1)
def construct(self, gt_bboxes_i, gt_labels_i, valid_mask, bboxes, gt_valids, gt_masks_i):
gt_bboxes_i = self.select(self.cast(self.tile(self.reshape(self.cast(gt_valids, mstype.int32), \
(self.num_gts, 1)), (1, 4)), mstype.bool_), \
gt_bboxes_i, self.check_gt_one)
bboxes = self.select(self.cast(self.tile(self.reshape(self.cast(valid_mask, mstype.int32), \
(self.num_bboxes, 1)), (1, 4)), mstype.bool_), \
bboxes, self.check_anchor_two)
overlaps = self.iou(bboxes, gt_bboxes_i)
max_overlaps_w_gt_index, max_overlaps_w_gt = self.max_gt(overlaps)
_, max_overlaps_w_ac = self.max_anchor(overlaps)
neg_sample_iou_mask = self.logicaland(self.greaterequal(max_overlaps_w_gt,
self.scalar_zero),
self.less(max_overlaps_w_gt,
self.scalar_neg_iou_thr))
assigned_gt_inds2 = self.select(neg_sample_iou_mask, self.assigned_gt_zeros, self.assigned_gt_inds)
pos_sample_iou_mask = self.greaterequal(max_overlaps_w_gt, self.scalar_pos_iou_thr)
assigned_gt_inds3 = self.select(pos_sample_iou_mask, \
max_overlaps_w_gt_index + self.assigned_gt_ones, assigned_gt_inds2)
for j in range(self.num_gts):
max_overlaps_w_ac_j = max_overlaps_w_ac[j:j+1:1]
overlaps_w_ac_j = overlaps[j:j+1:1, ::]
temp1 = self.greaterequal(max_overlaps_w_ac_j, self.scalar_min_pos_iou)
temp2 = self.squeeze(self.equal(overlaps_w_ac_j, max_overlaps_w_ac_j))
pos_mask_j = self.logicaland(temp1, temp2)
assigned_gt_inds3 = self.select(pos_mask_j, (j+1)*self.assigned_gt_ones, assigned_gt_inds3)
assigned_gt_inds5 = self.select(valid_mask, assigned_gt_inds3, self.assigned_gt_ignores)
bboxes = self.concat((gt_bboxes_i, bboxes))
label_inds_valid = self.select(gt_valids, self.label_inds, self.gt_ignores)
label_inds_valid = label_inds_valid * self.add_gt_as_proposals_valid
assigned_gt_inds5 = self.concat((label_inds_valid, assigned_gt_inds5))
# Get pos index
pos_index, valid_pos_index = self.random_choice_with_mask_pos(self.greater(assigned_gt_inds5, 0))
pos_check_valid = self.cast(self.greater(assigned_gt_inds5, 0), self.cast_type)
pos_check_valid = self.sum_inds(pos_check_valid, -1)
valid_pos_index = self.less(self.range_pos_size, pos_check_valid)
pos_index = pos_index * self.reshape(self.cast(valid_pos_index, mstype.int32), (self.num_expected_pos, 1))
num_pos = self.sum_inds(self.cast(self.logicalnot(valid_pos_index), self.cast_type), -1)
valid_pos_index = self.cast(valid_pos_index, mstype.int32)
pos_index = self.reshape(pos_index, self.reshape_shape_pos)
valid_pos_index = self.reshape(valid_pos_index, self.reshape_shape_pos)
pos_index = pos_index * valid_pos_index
pos_assigned_gt_index = self.gatherND(assigned_gt_inds5, pos_index) - self.assigned_pos_ones
pos_assigned_gt_index = self.reshape(pos_assigned_gt_index, self.reshape_shape_pos)
pos_assigned_gt_index = pos_assigned_gt_index * valid_pos_index
pos_gt_labels = self.gatherND(gt_labels_i, pos_assigned_gt_index)
# Get neg index
neg_index, valid_neg_index = self.random_choice_with_mask_neg(self.equal(assigned_gt_inds5, 0))
unvalid_pos_index = self.less(self.range_pos_size, num_pos)
valid_neg_index = self.logicaland(self.concat((self.check_neg_mask, unvalid_pos_index)), valid_neg_index)
neg_index = self.reshape(neg_index, self.reshape_shape_neg)
valid_neg_index = self.cast(valid_neg_index, mstype.int32)
valid_neg_index = self.reshape(valid_neg_index, self.reshape_shape_neg)
neg_index = neg_index * valid_neg_index
pos_bboxes_ = self.gatherND(bboxes, pos_index)
neg_bboxes_ = self.gatherND(bboxes, neg_index)
pos_assigned_gt_index = self.reshape(pos_assigned_gt_index, self.reshape_shape_pos)
pos_gt_bboxes_ = self.gatherND(gt_bboxes_i, pos_assigned_gt_index)
pos_bbox_targets_ = self.bounding_box_encode(pos_bboxes_, pos_gt_bboxes_)
# assign positive ROIs to gt masks
# Pick the right front and background mask for each ROI
roi_pos_masks_fb = self.gatherND(gt_masks_i, pos_assigned_gt_index)
pos_masks_fb = self.cast(roi_pos_masks_fb, mstype.float32)
# compute mask targets
x1, y1, x2, y2 = self.split(pos_bboxes_)
boxes = self.concat_last_axis((y1, x1, y2, x2))
# normalized box coordinate
boxes = boxes / self.image_h_w
box_ids = self.range()
pos_masks_fb = self.expand_dims(pos_masks_fb, -1)
boxes = self.cast(boxes, mstype.float32)
pos_masks_fb = self.crop_and_resize(pos_masks_fb, boxes, box_ids, self.mask_shape)
# Remove the extra dimension from masks.
pos_masks_fb = self.squeeze_mask_last(pos_masks_fb)
# convert gt masks targets be 0 or 1 to use with binary cross entropy loss.
pos_masks_fb = self.round(pos_masks_fb)
pos_masks_fb = self.cast(pos_masks_fb, self.cast_type)
total_bboxes = self.concat((pos_bboxes_, neg_bboxes_))
total_deltas = self.concat((pos_bbox_targets_, self.bboxs_neg_mask))
total_labels = self.concat((pos_gt_labels, self.labels_neg_mask))
valid_pos_index = self.reshape(valid_pos_index, self.reshape_shape_pos)
valid_neg_index = self.reshape(valid_neg_index, self.reshape_shape_neg)
total_mask = self.concat((valid_pos_index, valid_neg_index))
return total_bboxes, total_deltas, total_labels, total_mask, pos_bboxes_, pos_masks_fb, \
pos_gt_labels, valid_pos_index
| [
"mindspore.ops.operations.Squeeze",
"mindspore.ops.operations.Round",
"mindspore.ops.operations.RandomChoiceWithMask",
"mindspore.ops.operations.Concat",
"mindspore.ops.operations.ReduceSum",
"mindspore.ops.operations.GatherNd",
"mindspore.ops.operations.Reshape",
"mindspore.ops.operations.GreaterEqua... | [((2544, 2560), 'mindspore.ops.operations.Concat', 'P.Concat', ([], {'axis': '(0)'}), '(axis=0)\n', (2552, 2560), True, 'from mindspore.ops import operations as P\n'), ((2583, 2608), 'mindspore.ops.operations.ArgMaxWithValue', 'P.ArgMaxWithValue', ([], {'axis': '(0)'}), '(axis=0)\n', (2600, 2608), True, 'from mindspore.ops import operations as P\n'), ((2635, 2660), 'mindspore.ops.operations.ArgMaxWithValue', 'P.ArgMaxWithValue', ([], {'axis': '(1)'}), '(axis=1)\n', (2652, 2660), True, 'from mindspore.ops import operations as P\n'), ((2685, 2698), 'mindspore.ops.operations.ReduceSum', 'P.ReduceSum', ([], {}), '()\n', (2696, 2698), True, 'from mindspore.ops import operations as P\n'), ((2718, 2725), 'mindspore.ops.operations.IOU', 'P.IOU', ([], {}), '()\n', (2723, 2725), True, 'from mindspore.ops import operations as P\n'), ((2754, 2770), 'mindspore.ops.operations.GreaterEqual', 'P.GreaterEqual', ([], {}), '()\n', (2768, 2770), True, 'from mindspore.ops import operations as P\n'), ((2794, 2805), 'mindspore.ops.operations.Greater', 'P.Greater', ([], {}), '()\n', (2803, 2805), True, 'from mindspore.ops import operations as P\n'), ((2828, 2838), 'mindspore.ops.operations.Select', 'P.Select', ([], {}), '()\n', (2836, 2838), True, 'from mindspore.ops import operations as P\n'), ((2863, 2875), 'mindspore.ops.operations.GatherNd', 'P.GatherNd', ([], {}), '()\n', (2873, 2875), True, 'from mindspore.ops import operations as P\n'), ((2899, 2910), 'mindspore.ops.operations.Squeeze', 'P.Squeeze', ([], {}), '()\n', (2908, 2910), True, 'from mindspore.ops import operations as P\n'), ((2931, 2939), 'mindspore.ops.operations.Cast', 'P.Cast', ([], {}), '()\n', (2937, 2939), True, 'from mindspore.ops import operations as P\n'), ((2966, 2980), 'mindspore.ops.operations.LogicalAnd', 'P.LogicalAnd', ([], {}), '()\n', (2978, 2980), True, 'from mindspore.ops import operations as P\n'), ((3001, 3009), 'mindspore.ops.operations.Less', 'P.Less', ([], {}), '()\n', (3007, 3009), True, 'from mindspore.ops import operations as P\n'), ((3053, 3098), 'mindspore.ops.operations.RandomChoiceWithMask', 'P.RandomChoiceWithMask', (['self.num_expected_pos'], {}), '(self.num_expected_pos)\n', (3075, 3098), True, 'from mindspore.ops import operations as P\n'), ((3142, 3187), 'mindspore.ops.operations.RandomChoiceWithMask', 'P.RandomChoiceWithMask', (['self.num_expected_neg'], {}), '(self.num_expected_neg)\n', (3164, 3187), True, 'from mindspore.ops import operations as P\n'), ((3211, 3222), 'mindspore.ops.operations.Reshape', 'P.Reshape', ([], {}), '()\n', (3220, 3222), True, 'from mindspore.ops import operations as P\n'), ((3244, 3253), 'mindspore.ops.operations.Equal', 'P.Equal', ([], {}), '()\n', (3251, 3253), True, 'from mindspore.ops import operations as P\n'), ((3289, 3363), 'mindspore.ops.operations.BoundingBoxEncode', 'P.BoundingBoxEncode', ([], {'means': '(0.0, 0.0, 0.0, 0.0)', 'stds': '(0.1, 0.1, 0.2, 0.2)'}), '(means=(0.0, 0.0, 0.0, 0.0), stds=(0.1, 0.1, 0.2, 0.2))\n', (3308, 3363), True, 'from mindspore.ops import operations as P\n'), ((3392, 3408), 'mindspore.ops.operations.Concat', 'P.Concat', ([], {'axis': '(1)'}), '(axis=1)\n', (3400, 3408), True, 'from mindspore.ops import operations as P\n'), ((3435, 3449), 'mindspore.ops.operations.LogicalNot', 'P.LogicalNot', ([], {}), '()\n', (3447, 3449), True, 'from mindspore.ops import operations as P\n'), ((3470, 3478), 'mindspore.ops.operations.Tile', 'P.Tile', ([], {}), '()\n', (3476, 3478), True, 'from mindspore.ops import operations as P\n'), ((4838, 4871), 'mindspore.common.tensor.Tensor', 'Tensor', (['(0.0)'], {'dtype': 'self.cast_type'}), '(0.0, dtype=self.cast_type)\n', (4844, 4871), False, 'from mindspore.common.tensor import Tensor\n'), ((4906, 4952), 'mindspore.common.tensor.Tensor', 'Tensor', (['self.neg_iou_thr'], {'dtype': 'self.cast_type'}), '(self.neg_iou_thr, dtype=self.cast_type)\n', (4912, 4952), False, 'from mindspore.common.tensor import Tensor\n'), ((4987, 5033), 'mindspore.common.tensor.Tensor', 'Tensor', (['self.pos_iou_thr'], {'dtype': 'self.cast_type'}), '(self.pos_iou_thr, dtype=self.cast_type)\n', (4993, 5033), False, 'from mindspore.common.tensor import Tensor\n'), ((5068, 5114), 'mindspore.common.tensor.Tensor', 'Tensor', (['self.min_pos_iou'], {'dtype': 'self.cast_type'}), '(self.min_pos_iou, dtype=self.cast_type)\n', (5074, 5114), False, 'from mindspore.common.tensor import Tensor\n'), ((5143, 5157), 'mindspore.ops.operations.ExpandDims', 'P.ExpandDims', ([], {}), '()\n', (5155, 5157), True, 'from mindspore.ops import operations as P\n'), ((5179, 5208), 'mindspore.ops.operations.Split', 'P.Split', ([], {'axis': '(1)', 'output_num': '(4)'}), '(axis=1, output_num=4)\n', (5186, 5208), True, 'from mindspore.ops import operations as P\n'), ((5241, 5258), 'mindspore.ops.operations.Concat', 'P.Concat', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5249, 5258), True, 'from mindspore.ops import operations as P\n'), ((5280, 5289), 'mindspore.ops.operations.Round', 'P.Round', ([], {}), '()\n', (5287, 5289), True, 'from mindspore.ops import operations as P\n'), ((5315, 5411), 'mindspore.common.tensor.Tensor', 'Tensor', (['[cfg.img_height, cfg.img_width, cfg.img_height, cfg.img_width]'], {'dtype': 'self.cast_type'}), '([cfg.img_height, cfg.img_width, cfg.img_height, cfg.img_width],\n dtype=self.cast_type)\n', (5321, 5411), False, 'from mindspore.common.tensor import Tensor\n'), ((5429, 5481), 'mindspore.nn.Range', 'nn.Range', ([], {'start': '(0)', 'limit': 'cfg.num_expected_pos_stage2'}), '(start=0, limit=cfg.num_expected_pos_stage2)\n', (5437, 5481), True, 'import mindspore.nn as nn\n'), ((5513, 5550), 'mindspore.ops.operations.CropAndResize', 'P.CropAndResize', ([], {'method': '"""bilinear_v2"""'}), "(method='bilinear_v2')\n", (5528, 5550), True, 'from mindspore.ops import operations as P\n'), ((5649, 5667), 'mindspore.ops.operations.Squeeze', 'P.Squeeze', ([], {'axis': '(-1)'}), '(axis=-1)\n', (5658, 5667), True, 'from mindspore.ops import operations as P\n'), ((1520, 1556), 'mindspore.context.get_context', 'context.get_context', (['"""device_target"""'], {}), "('device_target')\n", (1539, 1556), False, 'from mindspore import context\n'), ((4529, 4590), 'numpy.zeros', 'np.zeros', (['(self.num_expected_neg, 4)'], {'dtype': 'self.np_cast_type'}), '((self.num_expected_neg, 4), dtype=self.np_cast_type)\n', (4537, 4590), True, 'import numpy as np\n'), ((3872, 3892), 'numpy.zeros', 'np.zeros', (['num_bboxes'], {}), '(num_bboxes)\n', (3880, 3892), True, 'import numpy as np\n'), ((3959, 3978), 'numpy.ones', 'np.ones', (['num_bboxes'], {}), '(num_bboxes)\n', (3966, 3978), True, 'import numpy as np\n'), ((4140, 4170), 'numpy.ones', 'np.ones', (['self.num_expected_pos'], {}), '(self.num_expected_pos)\n', (4147, 4170), True, 'import numpy as np\n'), ((4420, 4474), 'numpy.ones', 'np.ones', (['(self.num_expected_neg - self.num_expected_pos)'], {}), '(self.num_expected_neg - self.num_expected_pos)\n', (4427, 4474), True, 'import numpy as np\n'), ((4639, 4670), 'numpy.zeros', 'np.zeros', (['self.num_expected_neg'], {}), '(self.num_expected_neg)\n', (4647, 4670), True, 'import numpy as np\n'), ((2291, 2321), 'numpy.arange', 'np.arange', (['(1)', '(self.num_gts + 1)'], {}), '(1, self.num_gts + 1)\n', (2300, 2321), True, 'import numpy as np\n'), ((2424, 2445), 'numpy.ones', 'np.ones', (['self.num_gts'], {}), '(self.num_gts)\n', (2431, 2445), True, 'import numpy as np\n'), ((3545, 3571), 'numpy.ones', 'np.ones', (['(self.num_gts, 4)'], {}), '((self.num_gts, 4))\n', (3552, 3571), True, 'import numpy as np\n'), ((3652, 3681), 'numpy.ones', 'np.ones', (['(self.num_bboxes, 4)'], {}), '((self.num_bboxes, 4))\n', (3659, 3681), True, 'import numpy as np\n'), ((3785, 3804), 'numpy.ones', 'np.ones', (['num_bboxes'], {}), '(num_bboxes)\n', (3792, 3804), True, 'import numpy as np\n'), ((4053, 4072), 'numpy.ones', 'np.ones', (['num_bboxes'], {}), '(num_bboxes)\n', (4060, 4072), True, 'import numpy as np\n'), ((4237, 4258), 'numpy.ones', 'np.ones', (['self.num_gts'], {}), '(self.num_gts)\n', (4244, 4258), True, 'import numpy as np\n'), ((4314, 4346), 'numpy.arange', 'np.arange', (['self.num_expected_pos'], {}), '(self.num_expected_pos)\n', (4323, 4346), True, 'import numpy as np\n')] |
#!/usr/bin/env python
u"""
MPI_ICESat2_ATL03.py (05/2021)
Read ICESat-2 ATL03 and ATL09 data files to calculate average segment surfaces
ATL03 datasets: Global Geolocated Photons
ATL09 datasets: Atmospheric Characteristics
CALLING SEQUENCE:
mpiexec -np 6 python MPI_ICESat2_ATL03.py ATL03_file ATL09_file
COMMAND LINE OPTIONS:
-O X, --output X: Name and path of output file
-V, --verbose: Verbose output to track progress
-M X, --mode X: Permission mode of files created
REQUIRES MPI PROGRAM
MPI: standardized and portable message-passing system
https://www.open-mpi.org/
http://mpitutorial.com/
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
https://numpy.org
https://numpy.org/doc/stable/user/numpy-for-matlab-users.html
scipy: Scientific Tools for Python
https://docs.scipy.org/doc/
mpi4py: MPI for Python
http://pythonhosted.org/mpi4py/
http://mpi4py.readthedocs.org/en/stable/
h5py: Python interface for Hierarchal Data Format 5 (HDF5)
https://h5py.org
http://docs.h5py.org/en/stable/mpi.html
scikit-learn: Machine Learning in Python
http://scikit-learn.org/stable/index.html
https://github.com/scikit-learn/scikit-learn
PROGRAM DEPENDENCIES:
convert_delta_time.py: converts from delta time into Julian and year-decimal
fit.py: Utilities for calculating fits from ATL03 Geolocated Photon Data
time.py: Utilities for calculating time operations
utilities.py: download and management utilities for syncing files
classify_photons.py: Yet Another Photon Classifier for Geolocated Photon Data
UPDATE HISTORY:
Updated 05/2021: add photon classifier based on GSFC YAPC algorithms
move surface fit operations into separate module
Updated 02/2021: replaced numpy bool/int to prevent deprecation warnings
Updated 01/2021: time utilities for converting times from JD and to decimal
Updated 12/2020: H5py deprecation warning change to use make_scale
Updated 10/2020: using argparse to set parameters
Updated 09/2020: using reference photon delta time to interpolate ATL09
Updated 08/2020: using convert delta time function to convert to Julian days
Updated 07/2020: "re-tiding" is no longer unnecessary
Updated 06/2020: verify that complementary beam pair is in list of beams
set masks of output arrays after reading from HDF5
add additional beam check within heights groups
Updated 10/2019: changing Y/N flags to True/False
Updated 09/2019: adding segment quality summary variable
Updated 04/2019: updated backup algorithm for when the surface fit fails
estimate both mean and median first photon bias corrections
estimate both mean and median transmit pulse shape corrections
Updated 03/2019: extract a set of ATL09 parameters for each ATL03 segment_ID
Updated 02/2019: procedures following ATBD for first ATL03 release
Written 05/2017
"""
from __future__ import print_function, division
import sys
import os
import re
import h5py
import argparse
import datetime
import numpy as np
import scipy.signal
import scipy.interpolate
import sklearn.neighbors
import sklearn.cluster
from mpi4py import MPI
import icesat2_toolkit.fit
import icesat2_toolkit.time
from icesat2_toolkit.convert_delta_time import convert_delta_time
from yapc.classify_photons import classify_photons
#-- PURPOSE: keep track of MPI threads
def info(rank, size):
print('Rank {0:d} of {1:d}'.format(rank+1,size))
print('module name: {0}'.format(__name__))
if hasattr(os, 'getppid'):
print('parent process: {0:d}'.format(os.getppid()))
print('process id: {0:d}'.format(os.getpid()))
#-- PURPOSE: reads ICESat-2 ATL03 and ATL09 HDF5 files
#-- and computes average heights over segments
def main():
#-- start MPI communicator
comm = MPI.COMM_WORLD
#-- Read the system arguments listed after the program
parser = argparse.ArgumentParser(
description="""Read ICESat-2 ATL03 and ATL09 data files to calculate
average segment surfaces
"""
)
#-- command line parameters
#-- first file listed contains the ATL03 file
#-- second file listed is the associated ATL09 file
parser.add_argument('ATL03',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='?',
help='ICESat-2 ATL03 file to run')
parser.add_argument('ATL09',
type=lambda p: os.path.abspath(os.path.expanduser(p)), nargs='?',
help='ICESat-2 ATL09 file to run')
#-- use default output file name
parser.add_argument('--output','-O',
type=lambda p: os.path.abspath(os.path.expanduser(p)),
help='Name and path of output file')
#-- verbosity settings
#-- verbose will output information about each output file
parser.add_argument('--verbose','-V',
default=False, action='store_true',
help='Verbose output of run')
#-- permissions mode of the local files (number in octal)
parser.add_argument('--mode','-M',
type=lambda x: int(x,base=8), default=0o775,
help='permissions mode of output files')
args = parser.parse_args()
#-- output module information for process
if args.verbose:
info(comm.rank,comm.size)
if args.verbose and (comm.rank==0):
print('{0} -->'.format(args.ATL03))
#-- directory setup
ATL03_dir = os.path.dirname(args.ATL03)
#-- compile regular expression operator for extracting data from ATL03 files
rx1 = re.compile(r'(processed)?(ATL\d+)_(\d{4})(\d{2})(\d{2})(\d{2})(\d{2})'
r'(\d{2})_(\d{4})(\d{2})(\d{2})_(\d{3})_(\d{2})(.*?).h5$')
#-- universal variables
#-- speed of light
c = 299792458.0
#-- associated beam pairs
associated_beam_pair = dict(gt1l='gt1r',gt1r='gt1l',gt2l='gt2r',gt2r='gt2l',
gt3l='gt3r',gt3r='gt3l')
#-- read ICESat-2 ATL03 HDF5 files (extract base parameters)
SUB,PRD,YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX=rx1.findall(args.ATL03).pop()
#-- Open the HDF5 file for reading
fileID = h5py.File(args.ATL03, 'r', driver='mpio', comm=comm)
#-- read each input beam within the file
IS2_atl03_beams = []
for gtx in [k for k in fileID.keys() if bool(re.match(r'gt\d[lr]',k))]:
#-- check if subsetted beam contains data
#-- check in both the geolocation and heights groups
try:
fileID[gtx]['geolocation']['segment_id']
fileID[gtx]['heights']['delta_time']
except KeyError:
pass
else:
IS2_atl03_beams.append(gtx)
#-- number of GPS seconds between the GPS epoch
#-- and ATLAS Standard Data Product (SDP) epoch
atlas_sdp_gps_epoch = fileID['ancillary_data']['atlas_sdp_gps_epoch'][:]
#-- which TEP to use for a given spot (convert to 0-based index)
tep_valid_spot = fileID['ancillary_data']['tep']['tep_valid_spot'][:] - 1
tep_pce = ['pce1_spot1','pce2_spot3']
#-- valid range of times for each TEP histogram
tep_range_prim = fileID['ancillary_data']['tep']['tep_range_prim'][:]
#-- save tep parameters for a given beam
tep = {}
#-- variables of interest for generating corrected elevation estimates
Segment_ID = {}
Segment_Index_begin = {}
Segment_PE_count = {}
Segment_Distance = {}
Segment_Length = {}
Segment_Background = {}
#-- fit parameters
Segment_delta_time = {}
Segment_Height = {}
Segment_Land_Ice = {}
Segment_dH_along = {}
Segment_dH_across = {}
Segment_Height_Error = {}
Segment_Land_Ice_Error = {}
Segment_dH_along_Error = {}
Segment_dH_across_Error = {}
Segment_Mean_Median = {}
Segment_X_atc = {}
Segment_X_spread = {}
Segment_Y_atc = {}
Segment_sigma_geo = {}
Segment_Longitude = {}
Segment_Latitude = {}
Segment_N_Fit = {}
Segment_Window = {}
Segment_RDE = {}
Segment_SNR = {}
Segment_Photon_SNR = {}
Segment_Summary = {}
Segment_Iterations = {}
Segment_Clusters = {}
Segment_Source = {}
Segment_Pulses = {}
#-- correction parameters
FPB_mean_corr = {}
FPB_mean_sigma = {}
FPB_median_corr = {}
FPB_median_sigma = {}
mean_dead_time = {}
FPB_n_corr = {}
FPB_cal_corr = {}
TPS_mean_corr = {}
TPS_median_corr = {}
#-- for each input beam within the file
for gtx in sorted(IS2_atl03_beams):
print(gtx) if args.verbose and (comm.rank == 0) else None
#-- beam type (weak versus strong) for time
atlas_beam_type = fileID[gtx].attrs['atlas_beam_type'].decode('utf-8')
n_pixels = 16.0 if (atlas_beam_type == "strong") else 4.0
#-- ATL03 Segment ID
Segment_ID[gtx] = fileID[gtx]['geolocation']['segment_id'][:]
#-- number of valid overlapping ATL03 segments
n_seg = len(Segment_ID[gtx]) - 1
#-- number of photon events
n_pe, = fileID[gtx]['heights']['delta_time'].shape
#-- first photon in the segment (convert to 0-based indexing)
Segment_Index_begin[gtx] = fileID[gtx]['geolocation']['ph_index_beg'][:] - 1
#-- number of photon events in the segment
Segment_PE_count[gtx] = fileID[gtx]['geolocation']['segment_ph_cnt'][:]
#-- along-track distance for each ATL03 segment
Segment_Distance[gtx] = fileID[gtx]['geolocation']['segment_dist_x'][:]
#-- along-track length for each ATL03 segment
Segment_Length[gtx] = fileID[gtx]['geolocation']['segment_length'][:]
#-- ocean tide
fv = fileID[gtx]['geophys_corr']['tide_ocean'].attrs['_FillValue']
tide_ocean = np.ma.array(fileID[gtx]['geophys_corr']['tide_ocean'][:],
fill_value=fv)
tide_ocean.mask = tide_ocean.data == tide_ocean.fill_value
#-- interpolate background photon rate based on 50-shot summation
background_delta_time = fileID[gtx]['bckgrd_atlas']['delta_time'][:]
SPL = scipy.interpolate.UnivariateSpline(background_delta_time,
fileID[gtx]['bckgrd_atlas']['bckgrd_rate'][:],k=3,s=0)
Segment_Background[gtx] = SPL(fileID[gtx]['geolocation']['delta_time'][:])
#-- ATLAS spot number for beam in current orientation
spot = int(fileID[gtx].attrs['atlas_spot_number'])
#-- get ATLAS impulse response variables for the transmitter echo path (TEP)
tep1,tep2 = ('atlas_impulse_response','tep_histogram')
#-- get appropriate transmitter-echo-path histogram for spot
associated_pce = tep_valid_spot[spot-1]
pce = tep_pce[associated_pce]
#-- delta time of TEP histogram
tep_tod, = fileID[tep1][pce][tep2]['tep_tod'][:]
#-- truncate tep to primary histogram (reflection 43-50 ns)
#-- and extract signal tep from noise tep. calculate width of tep
#-- ATL03 recommends subsetting between 15-30 ns to avoid secondary
tep_hist_time = np.copy(fileID[tep1][pce][tep2]['tep_hist_time'][:])
tep_hist = np.copy(fileID[tep1][pce][tep2]['tep_hist'][:])
t_TX,p_TX,W_TX,FWHM,TXs,TXe = icesat2_toolkit.fit.extract_tep_histogram(
tep_hist_time, tep_hist, tep_range_prim)
#-- save tep information and statistics
tep[gtx] = {}
tep[gtx]['pce'] = pce
tep[gtx]['tep_tod'] = tep_tod
tep[gtx]['tx_start'] = TXs
tep[gtx]['tx_end'] = TXe
tep[gtx]['tx_robust_sprd'] = W_TX
tep[gtx]['sigma_tx'] = FWHM
#-- channel dead time and first photon bias table for beam
cal1,cal2 = ('ancillary_data','calibrations')
channel_dead_time = fileID[cal1][cal2]['dead_time'][gtx]['dead_time'][:]
mean_dead_time[gtx] = np.mean(channel_dead_time)
fpb_dead_time = fileID[cal1][cal2]['first_photon_bias'][gtx]['dead_time'][:]
fpb_strength = fileID[cal1][cal2]['first_photon_bias'][gtx]['strength'][:]
fpb_width = fileID[cal1][cal2]['first_photon_bias'][gtx]['width'][:]
fpb_corr = fileID[cal1][cal2]['first_photon_bias'][gtx]['ffb_corr'][:]
#-- calculate first photon bias as a function of strength and width
#-- for the calculated mean dead time of the beam
ndt,ns,nw = np.shape(fpb_corr)
fpb_corr_dead_time = np.zeros((ns,nw))
for s in range(ns):
for w in range(nw):
SPL = scipy.interpolate.UnivariateSpline(fpb_dead_time/1e9,
fpb_corr[:,s,w],k=3,s=0)
fpb_corr_dead_time[s,w] = SPL(mean_dead_time[gtx])
#-- bivariate spline for estimating first-photon bias using CAL-19
CAL19 = scipy.interpolate.RectBivariateSpline(fpb_strength[0,:],
fpb_width[0,:]/1e9, fpb_corr_dead_time/1e12, kx=1, ky=1)
#-- allocate for output segment fit data
fill_value = fileID[gtx]['geolocation']['sigma_h'].attrs['_FillValue']
#-- delta time of fit photons
Distributed_delta_time = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_delta_time.mask = np.ones((n_seg),dtype=bool)
#-- segment fit heights
Distributed_Height = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Height.mask = np.ones((n_seg),dtype=bool)
#-- land ice height corrected for first photon bias and transmit-pulse shape
Distributed_Land_Ice = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Land_Ice.mask = np.ones((n_seg),dtype=bool)
#-- segment fit along-track slopes
Distributed_dH_along = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_along.mask = np.ones((n_seg),dtype=bool)
#-- segment fit height errors
Distributed_Height_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Height_Error.mask = np.ones((n_seg),dtype=bool)
#-- land ice height errors (max of fit or first photon bias uncertainties)
Distributed_Land_Ice_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Land_Ice_Error.mask = np.ones((n_seg),dtype=bool)
#-- segment fit along-track slope errors
Distributed_dH_along_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_along_Error.mask = np.ones((n_seg),dtype=bool)
#-- difference between the mean and median of the residuals from fit height
Distributed_Mean_Median = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Mean_Median.mask = np.ones((n_seg),dtype=bool)
#-- along-track X coordinates of segment fit
Distributed_X_atc = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_X_atc.mask = np.ones((n_seg),dtype=bool)
#-- along-track X coordinate spread of points used in segment fit
Distributed_X_spread = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_X_spread.mask = np.ones((n_seg),dtype=bool)
#-- along-track Y coordinates of segment fit
Distributed_Y_atc = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Y_atc.mask = np.ones((n_seg),dtype=bool)
#-- longitude of fit photons
Distributed_Longitude = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Longitude.mask = np.ones((n_seg),dtype=bool)
#-- latitude of fit photons
Distributed_Latitude = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Latitude.mask = np.ones((n_seg),dtype=bool)
#-- number of photons in fit
Distributed_N_Fit = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_N_Fit.mask = np.ones((n_seg),dtype=bool)
#-- size of the window used in the fit
Distributed_Window = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_Window.mask = np.ones((n_seg),dtype=bool)
#-- robust dispersion estimator
Distributed_RDE = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_RDE.mask = np.ones((n_seg),dtype=bool)
#-- signal-to-noise ratio
Distributed_SNR = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_SNR.mask = np.ones((n_seg),dtype=bool)
#-- maximum signal-to-noise ratio from photon classifier
Distributed_Photon_SNR = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Distributed_Photon_SNR.mask = np.ones((n_seg),dtype=bool)
#-- segment quality summary
Distributed_Summary = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_Summary.mask = np.ones((n_seg),dtype=bool)
#-- number of iterations for fit
Distributed_Iterations = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_Iterations.mask = np.ones((n_seg),dtype=bool)
#-- number of estimated clusters of data
Distributed_Clusters = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Distributed_Clusters.mask = np.ones((n_seg),dtype=bool)
#-- signal source selection
Distributed_Source = np.ma.zeros((n_seg),fill_value=4,dtype=int)
Distributed_Source.mask = np.ones((n_seg),dtype=bool)
#-- number of pulses in segment
Distributed_Pulses = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_Pulses.mask = np.ones((n_seg),dtype=bool)
#-- first photon bias estimates
Distributed_FPB_mean_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_mean_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_mean_sigma = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_mean_sigma.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_median_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_median_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_median_sigma = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_median_sigma.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_n_corr = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Distributed_FPB_n_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_FPB_cal_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_FPB_cal_corr.mask = np.ones((n_seg),dtype=bool)
#-- transmit pulse shape bias estimates
Distributed_TPS_mean_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_TPS_mean_corr.mask = np.ones((n_seg),dtype=bool)
Distributed_TPS_median_corr = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_TPS_median_corr.mask = np.ones((n_seg),dtype=bool)
#-- along-track and across-track distance for photon events
x_atc = fileID[gtx]['heights']['dist_ph_along'][:].copy()
y_atc = fileID[gtx]['heights']['dist_ph_across'][:].copy()
#-- photon event heights
h_ph = fileID[gtx]['heights']['h_ph'][:].copy()
#-- for each 20m segment
for j,_ in enumerate(Segment_ID[gtx]):
#-- index for 20m segment j
idx = Segment_Index_begin[gtx][j]
#-- skip segments with no photon events
if (idx < 0):
continue
#-- number of photons in 20m segment
cnt = Segment_PE_count[gtx][j]
#-- add segment distance to along-track coordinates
x_atc[idx:idx+cnt] += Segment_Distance[gtx][j]
#-- iterate over ATLAS major frames
photon_mframes = fileID[gtx]['heights']['pce_mframe_cnt'][:].copy()
pce_mframe_cnt = fileID[gtx]['bckgrd_atlas']['pce_mframe_cnt'][:].copy()
unique_major_frames,unique_index = np.unique(pce_mframe_cnt,return_index=True)
major_frame_count = len(unique_major_frames)
tlm_height_band1 = fileID[gtx]['bckgrd_atlas']['tlm_height_band1'][:].copy()
tlm_height_band2 = fileID[gtx]['bckgrd_atlas']['tlm_height_band2'][:].copy()
#-- photon event weights
Distributed_Weights = np.zeros((n_pe),dtype=np.float64)
#-- run for each major frame (distributed over comm.size # of processes)
for iteration in range(comm.rank, major_frame_count, comm.size):
#-- background atlas index for iteration
idx = unique_index[iteration]
#-- sum of 2 telemetry band widths for major frame
h_win_width = tlm_height_band1[idx] + tlm_height_band2[idx]
#-- photon indices for major frame (buffered by 1 on each side)
i1, = np.nonzero((photon_mframes >= unique_major_frames[iteration]-1) &
(photon_mframes <= unique_major_frames[iteration]+1))
#-- indices for the major frame within the buffered window
i2, = np.nonzero(photon_mframes[i1] == unique_major_frames[iteration])
#-- calculate photon event weights
Distributed_Weights[i1[i2]] = classify_photons(x_atc[i1], h_ph[i1],
h_win_width, i2, K=5, MIN_PH=5, MIN_XSPREAD=1.0,
MIN_HSPREAD=0.01, METHOD='linear')
#-- photon event weights
pe_weights = np.zeros((n_pe),dtype=np.float64)
comm.Allreduce(sendbuf=[Distributed_Weights, MPI.DOUBLE], \
recvbuf=[pe_weights, MPI.DOUBLE], op=MPI.SUM)
Distributed_Weights = None
#-- wait for all distributed processes to finish for beam
comm.Barrier()
#-- iterate over valid ATL03 segments
#-- in ATL03 1-based indexing: invalid == 0
#-- here in 0-based indexing: invalid == -1
segment_indices, = np.nonzero((Segment_Index_begin[gtx][:-1] >= 0) &
(Segment_Index_begin[gtx][1:] >= 0))
iteration_count = len(segment_indices)
#-- run for each geoseg (distributed over comm.size # of processes)
for iteration in range(comm.rank, iteration_count, comm.size):
#-- indice for iteration (can run through a subset of segments)
j = segment_indices[iteration]
#-- iterate over valid ATL03 segments
#-- in ATL03 1-based indexing: invalid == 0
#-- here in 0-based indexing: invalid == -1
if (Segment_Index_begin[gtx][j] >= 0):
#-- index for segment j
idx = Segment_Index_begin[gtx][j]
#-- number of photons in segment (use 2 ATL03 segments)
c1 = int(Segment_PE_count[gtx][j])
c2 = int(Segment_PE_count[gtx][j+1])
cnt = c1 + c2
#-- time of each Photon event (PE)
segment_times = np.copy(fileID[gtx]['heights']['delta_time'][idx:idx+cnt])
#-- Photon event lat/lon and elevation (re-tided WGS84)
segment_heights = np.copy(h_ph[idx:idx+cnt])
#-- ATL03 pe heights no longer apply the ocean tide
#-- and so "re-tiding" is no longer unnecessary
# segment_heights[:c1] += tide_ocean[j]
# segment_heights[c1:] += tide_ocean[j+1]
segment_lats = np.copy(fileID[gtx]['heights']['lat_ph'][idx:idx+cnt])
segment_lons = np.copy(fileID[gtx]['heights']['lon_ph'][idx:idx+cnt])
#-- Photon event channel and identification
ID_channel = np.copy(fileID[gtx]['heights']['ph_id_channel'][idx:idx+cnt])
ID_pulse = np.copy(fileID[gtx]['heights']['ph_id_pulse'][idx:idx+cnt])
n_pulses = np.unique(ID_pulse).__len__()
frame_number = np.copy(fileID[gtx]['heights']['pce_mframe_cnt'][idx:idx+cnt])
#-- vertical noise-photon density
background_density = 2.0*n_pulses*Segment_Background[gtx][j]/c
#-- along-track X and Y coordinates
distance_along_X = np.copy(x_atc[idx:idx+cnt])
distance_along_Y = np.copy(y_atc[idx:idx+cnt])
#-- check the spread of photons along-track (must be > 20m)
along_X_spread = distance_along_X.max() - distance_along_X.min()
#-- check confidence level associated with each photon event
#-- -2: TEP
#-- -1: Events not associated with a specific surface type
#-- 0: noise
#-- 1: buffer but algorithm classifies as background
#-- 2: low
#-- 3: medium
#-- 4: high
#-- Surface types for signal classification confidence
#-- 0=Land; 1=Ocean; 2=SeaIce; 3=LandIce; 4=InlandWater
ice_sig_conf = np.copy(fileID[gtx]['heights']['signal_conf_ph'][idx:idx+cnt,3])
ice_sig_low_count = np.count_nonzero(ice_sig_conf > 1)
#-- indices of TEP classified photons
ice_sig_tep_pe, = np.nonzero(ice_sig_conf == -2)
#-- photon event weights from photon classifier
segment_weights = pe_weights[idx:idx+cnt]
snr_norm = np.max(segment_weights)
#-- photon event signal-to-noise ratio from photon classifier
photon_snr = np.array(100.0*segment_weights/snr_norm,dtype=int)
Distributed_Photon_SNR.data[j] = np.copy(snr_norm)
Distributed_Photon_SNR.mask[j] = (snr_norm > 0)
#-- photon confidence levels from classifier
pe_sig_conf = np.zeros((cnt),dtype=int)
#-- calculate confidence levels from photon classifier
pe_sig_conf[photon_snr >= 25] = 2
pe_sig_conf[photon_snr >= 60] = 3
pe_sig_conf[photon_snr >= 80] = 4
#-- copy classification for TEP photons
pe_sig_conf[ice_sig_tep_pe] = -2
pe_sig_low_count = np.count_nonzero(pe_sig_conf > 1)
#-- check if segment has photon events classified for land ice
#-- that are at or above low-confidence threshold
#-- and that the spread of photons is greater than 20m
if (pe_sig_low_count > 10) & (along_X_spread > 20):
#-- use density-based spatial clustering in segment
db = sklearn.cluster.DBSCAN(eps=0.5).fit(
np.c_[distance_along_X, segment_heights],
sample_weight=photon_snr)
labels = db.labels_
#-- number of noise photons
noise_photons = list(labels).count(-1)
noise_cluster = 1 if noise_photons else 0
#-- number of photon event clusters in segment
n_clusters = len(set(labels)) - noise_cluster
Distributed_Clusters.data[j] = n_clusters
Distributed_Clusters.mask[j] = (n_clusters > 0)
#-- perform a surface fit procedure
Segment_X = Segment_Distance[gtx][j] + Segment_Length[gtx][j]
valid,fit,centroid = icesat2_toolkit.fit.try_surface_fit(
distance_along_X, distance_along_Y, segment_heights,
pe_sig_conf, Segment_X, SURF_TYPE='linear', ITERATE=20,
CONFIDENCE=[1,0])
#-- indices of points used in final iterated fit
ifit = fit['indices'] if valid else None
if bool(valid) & (np.abs(fit['error'][0]) < 20):
Distributed_Height.data[j] = fit['beta'][0]
Distributed_Height.mask[j] = False
Distributed_dH_along.data[j] = fit['beta'][1]
Distributed_dH_along.mask[j] = False
Distributed_Height_Error.data[j] = fit['error'][0]
Distributed_Height_Error.mask[j] = False
Distributed_dH_along_Error.data[j] = fit['error'][1]
Distributed_dH_along_Error.mask[j] = False
#-- along-track and cross-track coordinates
Distributed_X_atc.data[j] = np.copy(centroid['x'])
Distributed_X_atc.mask[j] = False
Distributed_X_spread.data[j] = np.copy(along_X_spread)
Distributed_X_spread.mask[j] = False
Distributed_Y_atc.data[j] = np.copy(centroid['y'])
Distributed_Y_atc.mask[j] = False
#-- fit geolocation to the along-track distance of segment
Distributed_delta_time.data[j] = \
icesat2_toolkit.fit.fit_geolocation(segment_times[ifit],
distance_along_X[ifit], Distributed_X_atc[j])
Distributed_delta_time.mask[j] = False
Distributed_Longitude.data[j] = \
icesat2_toolkit.fit.fit_geolocation(segment_lons[ifit],
distance_along_X[ifit], Distributed_X_atc[j])
Distributed_Longitude.mask[j] = False
Distributed_Latitude.data[j] = \
icesat2_toolkit.fit.fit_geolocation(segment_lats[ifit],
distance_along_X[ifit], Distributed_X_atc[j])
Distributed_Latitude.mask[j] = False
#-- number of photons used in fit
Distributed_N_Fit.data[j] = len(ifit)
Distributed_N_Fit.mask[j] = False
#-- size of the final window
Distributed_Window.data[j] = np.copy(fit['window'])
Distributed_Window.mask[j] = False
#-- robust dispersion estimator
Distributed_RDE.data[j] = np.copy(fit['RDE'])
Distributed_RDE.mask[j] = False
#-- signal to noise ratio
N_BG = background_density*Distributed_Window.data[j]
Distributed_SNR.data[j] = Distributed_N_Fit.data[j]/N_BG
Distributed_SNR.mask[j] = False
#-- number of iterations used in fit
Distributed_Iterations.data[j] = np.copy(fit['iterations'])
Distributed_Iterations.mask[j] = False
Distributed_Source.data[j] = np.copy(valid)
Distributed_Source.mask[j] = False
Distributed_Pulses.data[j] = np.copy(n_pulses)
Distributed_Pulses.mask[j] = False
#-- calculate residuals off of fit surface for all data
x_slope = Distributed_dH_along[j]*(distance_along_X-Distributed_X_atc[j])
height_residuals = segment_heights-Distributed_Height[j]-x_slope
temporal_residuals = -2.0*height_residuals/c
#-- calculate difference between the mean and the median from the fit
Distributed_Mean_Median.data[j] = np.mean(height_residuals[ifit]) - \
np.median(height_residuals[ifit])
Distributed_Mean_Median.mask[j] = False
#-- calculate flags for quality summary
VPD = Distributed_N_Fit.data[j]/Distributed_Window.data[j]
Distributed_Summary.data[j] = int(
(Distributed_RDE.data[j] >= 1) |
(Distributed_Height_Error.data[j] >= 1) |
(VPD <= (n_pixels/4.0)))
Distributed_Summary.mask[j] = False
#-- estimate first photon bias corrections
#-- step-size for histograms (50 ps ~ 7.5mm height)
ii, = np.nonzero((height_residuals >= -Distributed_Window.data[j]) &
(height_residuals <= Distributed_Window.data[j]))
try:
FPB = icesat2_toolkit.fit.calc_first_photon_bias(
temporal_residuals[ii], n_pulses, n_pixels,
mean_dead_time[gtx], 5e-11, ITERATE=20)
except:
pass
else:
Distributed_FPB_mean_corr.data[j] = -0.5*FPB['mean']*c
Distributed_FPB_mean_corr.mask[j] = False
Distributed_FPB_mean_sigma.data[j] = 0.5*FPB['mean_sigma']*c
Distributed_FPB_mean_sigma.mask[j] = False
Distributed_FPB_median_corr.data[j] = -0.5*FPB['median']*c
Distributed_FPB_median_corr.mask[j] = False
Distributed_FPB_median_sigma.data[j] = 0.5*FPB['median_sigma']*c
Distributed_FPB_median_sigma.mask[j] = False
Distributed_FPB_n_corr.data[j] = np.copy(FPB['count'])
Distributed_FPB_n_corr.mask[j] = False
#-- first photon bias correction from CAL-19
FPB_calibrated = CAL19.ev(FPB['strength'],FPB['width'])
Distributed_FPB_cal_corr.data[j] = -0.5*FPB_calibrated*c
Distributed_FPB_cal_corr.mask[j] = False
#-- estimate transmit pulse shape correction
try:
W_RX = 2.0*Distributed_RDE.data[j]/c
dt_W = 2.0*Distributed_Window.data[j]/c
TPS = icesat2_toolkit.fit.calc_transmit_pulse_shape(t_TX,
p_TX, W_TX, W_RX, dt_W, Distributed_SNR.data[j],
ITERATE=50)
except:
pass
else:
Distributed_TPS_mean_corr.data[j] = 0.5*TPS['mean']*c
Distributed_TPS_mean_corr.mask[j] = False
Distributed_TPS_median_corr.data[j] = 0.5*TPS['median']*c
Distributed_TPS_median_corr.mask[j] = False
#-- some ATL03 segments will not result in a valid fit
#-- backup algorithm uses 4 segments to find a valid surface
if (j not in (0,n_seg-2,n_seg-1)) & Distributed_Height.mask[j] & \
(Segment_Index_begin[gtx][j-1] > 0):
#-- index for segment j
idx = Segment_Index_begin[gtx][j-1]
#-- number of photons in segment (use 4 ATL03 segments)
c1 = Segment_PE_count[gtx][j-1].astype(int)
c2 = Segment_PE_count[gtx][j].astype(int)
c3 = Segment_PE_count[gtx][j+1].astype(int)
c4 = Segment_PE_count[gtx][j+2].astype(int)
cnt = c1 + c2 + c3 + c4
#-- time of each Photon event (PE)
segment_times = np.copy(fileID[gtx]['heights']['delta_time'][idx:idx+cnt])
#-- Photon event lat/lon and elevation (re-tided WGS84)
segment_heights = np.copy(h_ph[idx:idx+cnt])
#-- ATL03 pe heights no longer apply the ocean tide
#-- and so "re-tiding" is no longer unnecessary
# segment_heights[:c1] += tide_ocean[j-1]
# segment_heights[c1:c1+c2] += tide_ocean[j]
# segment_heights[c1+c2:c1+c2+c3] += tide_ocean[j+1]
# segment_heights[c1+c2+c3:] += tide_ocean[j+2]
segment_lats = np.copy(fileID[gtx]['heights']['lat_ph'][idx:idx+cnt])
segment_lons = np.copy(fileID[gtx]['heights']['lon_ph'][idx:idx+cnt])
#-- Photon event channel and identification
ID_channel = np.copy(fileID[gtx]['heights']['ph_id_channel'][idx:idx+cnt])
ID_pulse = np.copy(fileID[gtx]['heights']['ph_id_pulse'][idx:idx+cnt])
n_pulses = np.unique(ID_pulse).__len__()
frame_number = np.copy(fileID[gtx]['heights']['pce_mframe_cnt'][idx:idx+cnt])
#-- vertical noise-photon density
background_density = 2.0*n_pulses*Segment_Background[gtx][j]/c
#-- along-track X and Y coordinates
distance_along_X = np.copy(x_atc[idx:idx+cnt])
distance_along_Y = np.copy(y_atc[idx:idx+cnt])
#-- check the spread of photons along-track (must be > 40m)
along_X_spread = distance_along_X.max() - distance_along_X.min()
#-- check confidence level associated with each photon event
#-- -2: TEP
#-- -1: Events not associated with a specific surface type
#-- 0: noise
#-- 1: buffer but algorithm classifies as background
#-- 2: low
#-- 3: medium
#-- 4: high
#-- Surface types for signal classification confidence
#-- 0=Land; 1=Ocean; 2=SeaIce; 3=LandIce; 4=InlandWater
ice_sig_conf = np.copy(fileID[gtx]['heights']['signal_conf_ph'][idx:idx+cnt,3])
ice_sig_low_count = np.count_nonzero(ice_sig_conf > 1)
#-- indices of TEP classified photons
ice_sig_tep_pe, = np.nonzero(ice_sig_conf == -2)
#-- photon event weights from photon classifier
segment_weights = pe_weights[idx:idx+cnt]
snr_norm = np.max(segment_weights)
#-- photon event signal-to-noise ratio from photon classifier
photon_snr = np.zeros((cnt),dtype=int)
if (snr_norm > 0):
photon_snr[:] = 100.0*segment_weights/snr_norm
#-- copy signal to noise ratio for segment
Distributed_Photon_SNR.data[j] = np.copy(snr_norm)
Distributed_Photon_SNR.mask[j] = (snr_norm > 0)
#-- photon confidence levels from classifier
pe_sig_conf = np.zeros((cnt),dtype=int)
#-- calculate confidence levels from photon classifier
pe_sig_conf[photon_snr >= 25] = 2
pe_sig_conf[photon_snr >= 60] = 3
pe_sig_conf[photon_snr >= 80] = 4
#-- copy classification for TEP photons
pe_sig_conf[ice_sig_tep_pe] = -2
pe_sig_low_count = np.count_nonzero(pe_sig_conf > 1)
#-- check if segment has photon events classified for land ice
#-- that are at or above low-confidence threshold
#-- and that the spread of photons is greater than 40m
if (pe_sig_low_count > 10) & (along_X_spread > 40):
#-- use density-based spatial clustering in segment
db = sklearn.cluster.DBSCAN(eps=0.5).fit(
np.c_[distance_along_X, segment_heights],
sample_weight=photon_snr)
labels = db.labels_
#-- number of noise photons
noise_photons = list(labels).count(-1)
noise_cluster = 1 if noise_photons else 0
#-- number of photon event clusters in segment
n_clusters = len(set(labels)) - noise_cluster
Distributed_Clusters.data[j] = n_clusters
Distributed_Clusters.mask[j] = (n_clusters > 0)
#-- perform a surface fit procedure
Segment_X = Segment_Distance[gtx][j] + Segment_Length[gtx][j]
valid,fit,centroid = icesat2_toolkit.fit.try_surface_fit(
distance_along_X, distance_along_Y, segment_heights,
pe_sig_conf, Segment_X, SURF_TYPE='quadratic',
ITERATE=20, CONFIDENCE=[0])
#-- indices of points used in final iterated fit
ifit = fit['indices'] if valid else None
if bool(valid) & (np.abs(fit['error'][0]) < 20):
Distributed_Height.data[j] = fit['beta'][0]
Distributed_Height.mask[j] = False
Distributed_dH_along.data[j] = fit['beta'][1]
Distributed_dH_along.mask[j] = False
Distributed_Height_Error.data[j] = fit['error'][0]
Distributed_Height_Error.mask[j] = False
Distributed_dH_along_Error.data[j] = fit['error'][1]
Distributed_dH_along_Error.mask[j] = False
#-- along-track and cross-track coordinates
Distributed_X_atc.data[j] = np.copy(centroid['x'])
Distributed_X_atc.mask[j] = False
Distributed_X_spread.data[j] = np.copy(along_X_spread)
Distributed_X_spread.mask[j] = False
Distributed_Y_atc.data[j] = np.copy(centroid['y'])
Distributed_Y_atc.mask[j] = False
#-- fit geolocation to the along-track distance of segment
Distributed_delta_time.data[j] = \
icesat2_toolkit.fit.fit_geolocation(segment_times[ifit],
distance_along_X[ifit], Distributed_X_atc[j])
Distributed_Longitude.data[j] = \
icesat2_toolkit.fit.fit_geolocation(segment_lons[ifit],
distance_along_X[ifit], Distributed_X_atc[j])
Distributed_Longitude.mask[j] = False
Distributed_Latitude.data[j] = \
icesat2_toolkit.fit.fit_geolocation(segment_lats[ifit],
distance_along_X[ifit], Distributed_X_atc[j])
#-- number of photons used in fit
Distributed_N_Fit.data[j] = len(ifit)
Distributed_N_Fit.mask[j] = False
#-- size of the final window
Distributed_Window.data[j] = np.copy(fit['window'])
Distributed_Window.mask[j] = False
#-- robust dispersion estimator
Distributed_RDE.data[j] = np.copy(fit['RDE'])
Distributed_RDE.mask[j] = False
#-- signal to noise ratio
N_BG = background_density*Distributed_Window.data[j]
Distributed_SNR.data[j] = Distributed_N_Fit.data[j]/N_BG
Distributed_SNR.mask[j] = False
#-- number of iterations used in fit
Distributed_Iterations.data[j] = np.copy(fit['iterations'])
Distributed_Iterations.mask[j] = False
Distributed_Source.data[j] = 2 + np.copy(valid)
Distributed_Source.mask[j] = False
Distributed_Pulses.data[j] = np.copy(n_pulses)
Distributed_Pulses.mask[j] = False
#-- calculate residuals off of fit surface for all data
x_slope = Distributed_dH_along[j]*(distance_along_X-Distributed_X_atc[j])
height_residuals = segment_heights-Distributed_Height[j]-x_slope
temporal_residuals = -2.0*height_residuals/c
#-- calculate difference between the mean and the median from the fit
Distributed_Mean_Median.data[j] = np.mean(height_residuals[ifit]) - \
np.median(height_residuals[ifit])
Distributed_Mean_Median.mask[j] = False
#-- calculate flags for quality summary
VPD = Distributed_N_Fit.data[j]/Distributed_Window.data[j]
Distributed_Summary.data[j] = int(
(Distributed_RDE.data[j] >= 1) |
(Distributed_Height_Error.data[j] >= 1) |
(VPD <= (n_pixels/4.0)))
Distributed_Summary.mask[j] = False
#-- estimate first photon bias corrections
#-- step-size for histograms (50 ps ~ 7.5mm height)
try:
ii, = np.nonzero((height_residuals >= -Distributed_Window.data[j]) &
(height_residuals <= Distributed_Window.data[j]))
FPB = icesat2_toolkit.fit.calc_first_photon_bias(
temporal_residuals[ii], n_pulses, n_pixels,
mean_dead_time[gtx], 5e-11, ITERATE=20)
except:
pass
else:
Distributed_FPB_mean_corr.data[j] = -0.5*FPB['mean']*c
Distributed_FPB_mean_corr.mask[j] = False
Distributed_FPB_mean_sigma.data[j] = 0.5*FPB['mean_sigma']*c
Distributed_FPB_mean_sigma.mask[j] = False
Distributed_FPB_median_corr.data[j] = -0.5*FPB['median']*c
Distributed_FPB_median_corr.mask[j] = False
Distributed_FPB_median_sigma.data[j] = 0.5*FPB['median_sigma']*c
Distributed_FPB_median_sigma.mask[j] = False
Distributed_FPB_n_corr.data[j] = np.copy(FPB['count'])
Distributed_FPB_n_corr.mask[j] = False
#-- first photon bias correction from CAL-19
FPB_calibrated = CAL19.ev(FPB['strength'],FPB['width'])
Distributed_FPB_cal_corr.data[j] = -0.5*FPB_calibrated*c
Distributed_FPB_cal_corr.mask[j] = False
#-- estimate transmit pulse shape correction
try:
W_RX = 2.0*Distributed_RDE.data[j]/c
dt_W = 2.0*Distributed_Window.data[j]/c
TPS = icesat2_toolkit.fit.calc_transmit_pulse_shape(t_TX,
p_TX, W_TX, W_RX, dt_W, Distributed_SNR.data[j],
ITERATE=50)
except:
pass
else:
Distributed_TPS_mean_corr.data[j] = 0.5*TPS['mean']*c
Distributed_TPS_mean_corr.mask[j] = False
Distributed_TPS_median_corr.data[j] = 0.5*TPS['median']*c
Distributed_TPS_median_corr.mask[j] = False
#-- if there is a valid land ice height
if (~Distributed_Height.mask[j]):
#-- land ice height corrected for first photon bias and transmit-pulse shape
#-- segment heights have already been "re-tided"
Distributed_Land_Ice.data[j] = Distributed_Height.data[j] + \
Distributed_FPB_median_corr.data[j] + Distributed_TPS_median_corr.data[j]
Distributed_Land_Ice.mask[j] = False
#-- land ice height errors (max of fit or first photon bias uncertainties)
Distributed_Land_Ice_Error.data[j] = np.sqrt(np.max([
Distributed_Height_Error.data[j]**2,
Distributed_FPB_median_sigma.data[j]**2]))
Distributed_Land_Ice_Error.mask[j] = False
#-- communicate output MPI matrices between ranks
#-- operations are element summations and logical "and" across elements
#-- delta time of fit photons
Segment_delta_time[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_delta_time[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_delta_time.data, MPI.DOUBLE], \
recvbuf=[Segment_delta_time[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_delta_time.mask, MPI.BOOL], \
recvbuf=[Segment_delta_time[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_delta_time = None
#-- segment fit heights
Segment_Height[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Height[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Height.data, MPI.DOUBLE], \
recvbuf=[Segment_Height[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Height.mask, MPI.BOOL], \
recvbuf=[Segment_Height[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Height = None
#-- land ice height corrected for first photon bias and transmit-pulse shape
Segment_Land_Ice[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Land_Ice[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Land_Ice.data, MPI.DOUBLE], \
recvbuf=[Segment_Land_Ice[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Land_Ice.mask, MPI.BOOL], \
recvbuf=[Segment_Land_Ice[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Land_Ice = None
#-- segment fit along-track slopes
Segment_dH_along[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_dH_along[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_dH_along.data, MPI.DOUBLE], \
recvbuf=[Segment_dH_along[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_dH_along.mask, MPI.BOOL], \
recvbuf=[Segment_dH_along[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_dH_along = None
#-- segment fit height errors
Segment_Height_Error[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Height_Error[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Height_Error.data, MPI.DOUBLE], \
recvbuf=[Segment_Height_Error[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Height_Error.mask, MPI.BOOL], \
recvbuf=[Segment_Height_Error[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Height_Error = None
#-- land ice height errors (max of fit or first photon bias uncertainties)
Segment_Land_Ice_Error[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Land_Ice_Error[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Land_Ice_Error.data, MPI.DOUBLE], \
recvbuf=[Segment_Land_Ice_Error[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Land_Ice_Error.mask, MPI.BOOL], \
recvbuf=[Segment_Land_Ice_Error[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Land_Ice_Error = None
#-- segment fit along-track slope errors
Segment_dH_along_Error[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_dH_along_Error[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_dH_along_Error.data, MPI.DOUBLE], \
recvbuf=[Segment_dH_along_Error[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_dH_along_Error.mask, MPI.BOOL], \
recvbuf=[Segment_dH_along_Error[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_dH_along_Error = None
#-- difference between the mean and median of the residuals from fit height
Segment_Mean_Median[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Mean_Median[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Mean_Median.data, MPI.DOUBLE], \
recvbuf=[Segment_Mean_Median[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Mean_Median.mask, MPI.BOOL], \
recvbuf=[Segment_Mean_Median[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Mean_Median = None
#-- along-track X coordinates of segment fit
Segment_X_atc[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_X_atc[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_X_atc.data, MPI.DOUBLE], \
recvbuf=[Segment_X_atc[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_X_atc.mask, MPI.BOOL], \
recvbuf=[Segment_X_atc[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_X_atc = None
#-- along-track X coordinate spread of points used in segment fit
Segment_X_spread[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_X_spread[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_X_spread.data, MPI.DOUBLE], \
recvbuf=[Segment_X_spread[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_X_spread.mask, MPI.BOOL], \
recvbuf=[Segment_X_spread[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_X_spread = None
#-- along-track Y coordinates of segment fit
Segment_Y_atc[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Y_atc[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Y_atc.data, MPI.DOUBLE], \
recvbuf=[Segment_Y_atc[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Y_atc.mask, MPI.BOOL], \
recvbuf=[Segment_Y_atc[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Y_atc = None
#-- longitude of fit photons
Segment_Longitude[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Longitude[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Longitude.data, MPI.DOUBLE], \
recvbuf=[Segment_Longitude[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Longitude.mask, MPI.BOOL], \
recvbuf=[Segment_Longitude[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Longitude = None
#-- latitude of fit photons
Segment_Latitude[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Latitude[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Latitude.data, MPI.DOUBLE], \
recvbuf=[Segment_Latitude[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Latitude.mask, MPI.BOOL], \
recvbuf=[Segment_Latitude[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Latitude = None
#-- number of photons in fit
Segment_N_Fit[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_N_Fit[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_N_Fit.data, MPI.INT], \
recvbuf=[Segment_N_Fit[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_N_Fit.mask, MPI.BOOL], \
recvbuf=[Segment_N_Fit[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_N_Fit = None
#-- size of the window used in the fit
Segment_Window[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_Window[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Window.data, MPI.DOUBLE], \
recvbuf=[Segment_Window[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Window.mask, MPI.BOOL], \
recvbuf=[Segment_Window[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Window = None
#-- robust dispersion estimator
Segment_RDE[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_RDE[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_RDE.data, MPI.DOUBLE], \
recvbuf=[Segment_RDE[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_RDE.mask, MPI.BOOL], \
recvbuf=[Segment_RDE[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_RDE = None
#-- signal-to-noise ratio
Segment_SNR[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_SNR[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_SNR.data, MPI.DOUBLE], \
recvbuf=[Segment_SNR[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_SNR.mask, MPI.BOOL], \
recvbuf=[Segment_SNR[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_SNR = None
#-- photon event signal-to-noise ratio from photon classifier
Segment_Photon_SNR[gtx] = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Segment_Photon_SNR[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Photon_SNR.data, MPI.INT], \
recvbuf=[Segment_Photon_SNR[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Photon_SNR.mask, MPI.BOOL], \
recvbuf=[Segment_Photon_SNR[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Photon_SNR = None
#-- segment quality summary
Segment_Summary[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_Summary[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Summary.data, MPI.INT], \
recvbuf=[Segment_Summary[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Summary.mask, MPI.BOOL], \
recvbuf=[Segment_Summary[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Summary = None
#-- number of iterations for fit
Segment_Iterations[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_Iterations[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Iterations.data, MPI.INT], \
recvbuf=[Segment_Iterations[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Iterations.mask, MPI.BOOL], \
recvbuf=[Segment_Iterations[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Iterations = None
#-- number of photon event clusters
Segment_Clusters[gtx] = np.ma.zeros((n_seg),fill_value=0,dtype=int)
Segment_Clusters[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Clusters.data, MPI.INT], \
recvbuf=[Segment_Clusters[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Clusters.mask, MPI.BOOL], \
recvbuf=[Segment_Clusters[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Clusters = None
#-- signal source selection
Segment_Source[gtx] = np.ma.zeros((n_seg),fill_value=4,dtype=int)
Segment_Source[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Source.data, MPI.INT], \
recvbuf=[Segment_Source[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Source.mask, MPI.BOOL], \
recvbuf=[Segment_Source[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Source = None
#-- number of pulses in segment
Segment_Pulses[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
Segment_Pulses[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_Pulses.data, MPI.INT], \
recvbuf=[Segment_Pulses[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_Pulses.mask, MPI.BOOL], \
recvbuf=[Segment_Pulses[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_Pulses = None
#-- first photon bias estimates
FPB_mean_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_mean_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_corr.data, MPI.DOUBLE], \
recvbuf=[FPB_mean_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_corr.mask, MPI.BOOL], \
recvbuf=[FPB_mean_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_mean_corr = None
FPB_mean_sigma[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_mean_sigma[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_sigma.data, MPI.DOUBLE], \
recvbuf=[FPB_mean_sigma[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_mean_sigma.mask, MPI.BOOL], \
recvbuf=[FPB_mean_sigma[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_mean_sigma = None
FPB_median_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_median_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_median_corr.data, MPI.DOUBLE], \
recvbuf=[FPB_median_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_median_corr.mask, MPI.BOOL], \
recvbuf=[FPB_median_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_median_corr = None
FPB_median_sigma[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_median_sigma[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_median_sigma.data, MPI.DOUBLE], \
recvbuf=[FPB_median_sigma[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_median_sigma.mask, MPI.BOOL], \
recvbuf=[FPB_median_sigma[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_median_sigma = None
FPB_n_corr[gtx] = np.ma.zeros((n_seg),fill_value=-1,dtype=int)
FPB_n_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_n_corr.data, MPI.INT], \
recvbuf=[FPB_n_corr[gtx].data, MPI.INT], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_n_corr.mask, MPI.BOOL], \
recvbuf=[FPB_n_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_n_corr = None
FPB_cal_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
FPB_cal_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_FPB_cal_corr.data, MPI.DOUBLE], \
recvbuf=[FPB_cal_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_FPB_cal_corr.mask, MPI.BOOL], \
recvbuf=[FPB_cal_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_FPB_cal_corr = None
#-- transmit pulse shape bias estimates
TPS_mean_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
TPS_mean_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_TPS_mean_corr.data, MPI.DOUBLE], \
recvbuf=[TPS_mean_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_TPS_mean_corr.mask, MPI.BOOL], \
recvbuf=[TPS_mean_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_TPS_mean_corr = None
TPS_median_corr[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
TPS_median_corr[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_TPS_median_corr.data, MPI.DOUBLE], \
recvbuf=[TPS_median_corr[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_TPS_median_corr.mask, MPI.BOOL], \
recvbuf=[TPS_median_corr[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_TPS_median_corr = None
#-- wait for all distributed processes to finish for beam
comm.Barrier()
#-- copy variables for outputting to HDF5 file
IS2_atl03_fit = {}
IS2_atl03_fill = {}
IS2_atl03_attrs = {}
#-- ICESat-2 spacecraft orientation at time
IS2_atl03_fit['orbit_info'] = {}
IS2_atl03_attrs['orbit_info'] = {}
for key,val in fileID['orbit_info'].items():
IS2_atl03_fit['orbit_info'][key] = val[:]
#-- Getting attributes of group and included variables
#-- Global Group Attributes
for att_name,att_val in fileID['orbit_info'].attrs.items():
IS2_atl03_attrs['orbit_info'][att_name] = att_val
#-- Variable Attributes
IS2_atl03_attrs['orbit_info'][key] = {}
for att_name,att_val in val.attrs.items():
IS2_atl03_attrs['orbit_info'][key][att_name] = att_val
#-- information ancillary to the data product
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
#-- Add this value to delta time parameters to compute full gps_seconds
#-- could alternatively use the Julian day of the ATLAS SDP epoch: 2458119.5
#-- and add leap seconds since 2018-01-01T00:00:00Z UTC (ATLAS SDP epoch)
IS2_atl03_fit['ancillary_data'] = {}
IS2_atl03_attrs['ancillary_data'] = {}
for key in ['atlas_sdp_gps_epoch','data_end_utc','data_start_utc','end_cycle',
'end_geoseg','end_gpssow','end_gpsweek','end_orbit','end_region',
'end_rgt','granule_end_utc','granule_start_utc','release','start_cycle',
'start_geoseg','start_gpssow','start_gpsweek','start_orbit','start_region',
'start_rgt','version']:
#-- get each HDF5 variable
IS2_atl03_fit['ancillary_data'][key] = fileID['ancillary_data'][key][:]
#-- Getting attributes of group and included variables
IS2_atl03_attrs['ancillary_data'][key] = {}
for att_name,att_val in fileID['ancillary_data'][key].attrs.items():
IS2_atl03_attrs['ancillary_data'][key][att_name] = att_val
#-- for each output beam
for gtx in sorted(IS2_atl03_beams):
#-- atmospheric profile for beam gtx from ATL09 dataset
pfl = fileID[gtx].attrs['atmosphere_profile']
#-- complementary beam in pair
cmp = associated_beam_pair[gtx]
#-- extract and interpolate atmospheric parameters from ATL09
dtime = fileID[gtx]['geolocation']['delta_time'][:]
IS2_atl09_mds,IS2_atl09_attrs = read_HDF5_ATL09(args.ATL09, pfl,
dtime, ATTRIBUTES=True, VERBOSE=args.verbose, COMM=comm)
#-- segment fit across-track slopes
Distributed_dH_across = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_across.mask = np.ones((n_seg),dtype=bool)
#-- segment fit across-track slope errors
Distributed_dH_across_Error = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_dH_across_Error.mask = np.ones((n_seg),dtype=bool)
#-- contribution of geolocation uncertainty to height error
Distributed_sigma_geo = np.ma.zeros((n_seg),fill_value=fill_value)
Distributed_sigma_geo.mask = np.ones((n_seg),dtype=bool)
#-- iterate over valid ATL03 segments
#-- in ATL03 1-based indexing: invalid == 0
#-- here in 0-based indexing: invalid == -1
segment_indices, = np.nonzero((Segment_Index_begin[gtx][:-1] >= 0) &
(Segment_Index_begin[gtx][1:] >= 0))
#-- verify that complementary beam pair is in list of beams
iteration_count = len(segment_indices) if (cmp in IS2_atl03_beams) else 0
#-- run for each geoseg (distributed over comm.size # of processes)
for iteration in range(comm.rank, iteration_count, comm.size):
#-- indice for iteration (can run through a subset of segments)
j = segment_indices[iteration]
#-- across track slopes for beam
if ((~Segment_Height[gtx].mask[j]) & (~Segment_Height[cmp].mask[j])):
#-- segment fit across-track slopes
dY = (Segment_Y_atc[gtx].data[j] - Segment_Y_atc[cmp].data[j])
Distributed_dH_across.data[j] = (Segment_Land_Ice[gtx].data[j] -
Segment_Land_Ice[cmp].data[j])/dY
Distributed_dH_across.mask[j] = False
#-- segment fit across-track slope errors
Distributed_dH_across_Error.data[j] = np.sqrt(
Segment_Land_Ice_Error[gtx].data[j]**2 +
Segment_Land_Ice_Error[cmp].data[j]**2)/np.abs(dY)
Distributed_dH_across_Error.mask[j] = False
#-- geolocation uncertainty
sigma_geo_across = fileID[gtx]['geolocation']['sigma_across'][j]
sigma_geo_along = fileID[gtx]['geolocation']['sigma_along'][j]
sigma_geo_h = fileID[gtx]['geolocation']['sigma_h'][j]
#-- contribution of geolocation uncertainty to height errors
Distributed_sigma_geo.data[j] = np.sqrt(sigma_geo_h**2 +
(sigma_geo_along*Segment_dH_along[gtx].data[j])**2 +
(sigma_geo_across*Distributed_dH_across.data[j])**2)
Distributed_sigma_geo.mask[j] = False
#-- segment fit across-track slopes
Segment_dH_across[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_dH_across[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_dH_across.data, MPI.DOUBLE], \
recvbuf=[Segment_dH_across[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_dH_across.mask, MPI.BOOL], \
recvbuf=[Segment_dH_across[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_dH_across = None
#-- segment fit across-track slope errors
Segment_dH_across_Error[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_dH_across_Error[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_dH_across_Error.data, MPI.DOUBLE], \
recvbuf=[Segment_dH_across_Error[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_dH_across_Error.mask, MPI.BOOL], \
recvbuf=[Segment_dH_across_Error[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_dH_across_Error = None
#-- contribution of geolocation uncertainty to height errors
Segment_sigma_geo[gtx] = np.ma.zeros((n_seg),fill_value=fill_value)
Segment_sigma_geo[gtx].mask = np.ones((n_seg),dtype=bool)
comm.Allreduce(sendbuf=[Distributed_sigma_geo.data, MPI.DOUBLE], \
recvbuf=[Segment_sigma_geo[gtx].data, MPI.DOUBLE], op=MPI.SUM)
comm.Allreduce(sendbuf=[Distributed_sigma_geo.mask, MPI.BOOL], \
recvbuf=[Segment_sigma_geo[gtx].mask, MPI.BOOL], op=MPI.LAND)
Distributed_sigma_geo = None
#-- wait for all distributed processes to finish for beam
comm.Barrier()
#-- set values for invalid segments to fill_value of each variable
Segment_delta_time[gtx].data[Segment_delta_time[gtx].mask] = Segment_delta_time[gtx].fill_value
Segment_Height[gtx].data[Segment_Height[gtx].mask] = Segment_Height[gtx].fill_value
Segment_Land_Ice[gtx].data[Segment_Land_Ice[gtx].mask] = Segment_Land_Ice[gtx].fill_value
Segment_dH_along[gtx].data[Segment_dH_along[gtx].mask] = Segment_dH_along[gtx].fill_value
Segment_dH_across[gtx].data[Segment_dH_across[gtx].mask] = Segment_dH_across[gtx].fill_value
Segment_Height_Error[gtx].data[Segment_Height_Error[gtx].mask] = Segment_Height_Error[gtx].fill_value
Segment_Land_Ice_Error[gtx].data[Segment_Land_Ice_Error[gtx].mask] = Segment_Land_Ice_Error[gtx].fill_value
Segment_dH_along_Error[gtx].data[Segment_dH_along_Error[gtx].mask] = Segment_dH_along_Error[gtx].fill_value
Segment_dH_across_Error[gtx].data[Segment_dH_across_Error[gtx].mask] = Segment_dH_across_Error[gtx].fill_value
Segment_Mean_Median[gtx].data[Segment_Mean_Median[gtx].mask] = Segment_Mean_Median[gtx].fill_value
Segment_X_atc[gtx].data[Segment_X_atc[gtx].mask] = Segment_X_atc[gtx].fill_value
Segment_X_spread[gtx].data[Segment_X_spread[gtx].mask] = Segment_X_spread[gtx].fill_value
Segment_Y_atc[gtx].data[Segment_Y_atc[gtx].mask] = Segment_Y_atc[gtx].fill_value
Segment_sigma_geo[gtx].data[Segment_sigma_geo[gtx].mask] = Segment_sigma_geo[gtx].fill_value
Segment_Longitude[gtx].data[Segment_Longitude[gtx].mask] = Segment_Longitude[gtx].fill_value
Segment_Latitude[gtx].data[Segment_Latitude[gtx].mask] = Segment_Latitude[gtx].fill_value
Segment_N_Fit[gtx].data[Segment_N_Fit[gtx].mask] = Segment_N_Fit[gtx].fill_value
Segment_Window[gtx].data[Segment_Window[gtx].mask] = Segment_Window[gtx].fill_value
Segment_RDE[gtx].data[Segment_RDE[gtx].mask] = Segment_RDE[gtx].fill_value
Segment_SNR[gtx].data[Segment_SNR[gtx].mask] = Segment_SNR[gtx].fill_value
Segment_Summary[gtx].data[Segment_Summary[gtx].mask] = Segment_Summary[gtx].fill_value
Segment_Iterations[gtx].data[Segment_Iterations[gtx].mask] = Segment_Iterations[gtx].fill_value
Segment_Source[gtx].data[Segment_Source[gtx].mask] = Segment_Source[gtx].fill_value
Segment_Pulses[gtx].data[Segment_Pulses[gtx].mask] = Segment_Pulses[gtx].fill_value
FPB_mean_corr[gtx].data[FPB_mean_corr[gtx].mask] = FPB_mean_corr[gtx].fill_value
FPB_mean_sigma[gtx].data[FPB_mean_sigma[gtx].mask] = FPB_mean_sigma[gtx].fill_value
FPB_median_corr[gtx].data[FPB_median_corr[gtx].mask] = FPB_median_corr[gtx].fill_value
FPB_median_sigma[gtx].data[FPB_median_sigma[gtx].mask] = FPB_median_sigma[gtx].fill_value
FPB_n_corr[gtx].data[FPB_n_corr[gtx].mask] = FPB_n_corr[gtx].fill_value
FPB_cal_corr[gtx].data[FPB_cal_corr[gtx].mask] = FPB_cal_corr[gtx].fill_value
TPS_mean_corr[gtx].data[TPS_mean_corr[gtx].mask] = TPS_mean_corr[gtx].fill_value
TPS_median_corr[gtx].data[TPS_median_corr[gtx].mask] = TPS_median_corr[gtx].fill_value
#-- save tep and dead time information and statistics
IS2_atl03_fit['ancillary_data'][gtx] = {}
IS2_atl03_attrs['ancillary_data'][gtx] = {}
#-- tep time of day
IS2_atl03_fit['ancillary_data'][gtx]['tep_tod'] = np.array(tep[gtx]['tep_tod'])
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod'] = {}
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['units'] = "seconds since 2018-01-01"
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['long_name'] = "TEP Time Of Day"
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['standard_name'] = "time"
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['source'] = tep[gtx]['pce']
IS2_atl03_attrs['ancillary_data'][gtx]['tep_tod']['description'] = ("The time of day "
"at of the start of the data within the TEP histogram, in seconds since the "
"ATLAS SDP GPS Epoch. The ATLAS Standard Data Products (SDP) epoch offset is "
"defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds "
"between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. "
"By adding the offset contained within atlas_sdp_gps_epoch to delta time "
"parameters, the time in gps_seconds relative to the GPS epoch can be computed.")
#-- tep window start
IS2_atl03_fit['ancillary_data'][gtx]['tx_start'] = np.array(tep[gtx]['tx_start'])
IS2_atl03_attrs['ancillary_data'][gtx]['tx_start'] = {}
IS2_atl03_attrs['ancillary_data'][gtx]['tx_start']['units'] = "seconds"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_start']['long_name'] = "Start of the TEP Window"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_start']['contentType'] = "auxiliaryInformation"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_start']['source'] = tep[gtx]['pce']
IS2_atl03_attrs['ancillary_data'][gtx]['tx_start']['description'] = ("Starting time for the "
"window centered around the primary TEP arrival for calculating the transmit pulse shape.")
#-- tep window end
IS2_atl03_fit['ancillary_data'][gtx]['tx_end'] = np.array(tep[gtx]['tx_end'])
IS2_atl03_attrs['ancillary_data'][gtx]['tx_end'] = {}
IS2_atl03_attrs['ancillary_data'][gtx]['tx_end']['units'] = "seconds"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_end']['long_name'] = "End of the TEP Window"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_end']['contentType'] = "auxiliaryInformation"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_end']['source'] = tep[gtx]['pce']
IS2_atl03_attrs['ancillary_data'][gtx]['tx_end']['description'] = ("Ending time for the "
"window centered around the primary TEP arrival for calculating the transmit pulse shape.")
#-- tep robust dispersion estimator
IS2_atl03_fit['ancillary_data'][gtx]['tx_robust_sprd'] = np.array(tep[gtx]['tx_robust_sprd'])
IS2_atl03_attrs['ancillary_data'][gtx]['tx_robust_sprd'] = {}
IS2_atl03_attrs['ancillary_data'][gtx]['tx_robust_sprd']['units'] = "seconds"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_robust_sprd']['long_name'] = "Robust Spread"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_robust_sprd']['contentType'] = "auxiliaryInformation"
IS2_atl03_attrs['ancillary_data'][gtx]['tx_robust_sprd']['source'] = tep[gtx]['pce']
IS2_atl03_attrs['ancillary_data'][gtx]['tx_robust_sprd']['description'] = ("Temporal width of "
"the transmit pulse (sec), calculated from the RDE of the primary TEP waveform")
#-- tep full width at half maximum
IS2_atl03_fit['ancillary_data'][gtx]['sigma_tx'] = np.array(tep[gtx]['sigma_tx'])
IS2_atl03_attrs['ancillary_data'][gtx]['sigma_tx'] = {}
IS2_atl03_attrs['ancillary_data'][gtx]['sigma_tx']['units'] = "seconds"
IS2_atl03_attrs['ancillary_data'][gtx]['sigma_tx']['long_name'] = "Duration of Transmit Pulse"
IS2_atl03_attrs['ancillary_data'][gtx]['sigma_tx']['contentType'] = "auxiliaryInformation"
IS2_atl03_attrs['ancillary_data'][gtx]['sigma_tx']['source'] = tep[gtx]['pce']
IS2_atl03_attrs['ancillary_data'][gtx]['sigma_tx']['description'] = ("Temporal duration of "
"the transmit pulse (sec), calculated from the FWHM of the TEP waveform")
#-- mean dead time
IS2_atl03_fit['ancillary_data'][gtx]['t_dead'] = np.array(mean_dead_time[gtx])
IS2_atl03_attrs['ancillary_data'][gtx]['t_dead'] = {}
IS2_atl03_attrs['ancillary_data'][gtx]['t_dead']['units'] = "seconds"
IS2_atl03_attrs['ancillary_data'][gtx]['t_dead']['long_name'] = "Dead-time"
IS2_atl03_attrs['ancillary_data'][gtx]['t_dead']['contentType'] = "auxiliaryInformation"
IS2_atl03_attrs['ancillary_data'][gtx]['t_dead']['source'] = "CAL42"
IS2_atl03_attrs['ancillary_data'][gtx]['t_dead']['description'] = ("Mean dead-time for "
"channels in the detector (sec)")
#-- copy beam variables
IS2_atl03_fit[gtx] = dict(land_ice_segments={})
IS2_atl03_fill[gtx] = dict(land_ice_segments={})
IS2_atl03_attrs[gtx] = dict(land_ice_segments={})
#-- group attributes for beam
IS2_atl03_attrs[gtx]['Description'] = fileID[gtx].attrs['Description']
IS2_atl03_attrs[gtx]['atlas_pce'] = fileID[gtx].attrs['atlas_pce']
IS2_atl03_attrs[gtx]['atlas_beam_type'] = fileID[gtx].attrs['atlas_beam_type']
IS2_atl03_attrs[gtx]['groundtrack_id'] = fileID[gtx].attrs['groundtrack_id']
IS2_atl03_attrs[gtx]['atmosphere_profile'] = fileID[gtx].attrs['atmosphere_profile']
IS2_atl03_attrs[gtx]['atlas_spot_number'] = fileID[gtx].attrs['atlas_spot_number']
IS2_atl03_attrs[gtx]['sc_orientation'] = fileID[gtx].attrs['sc_orientation']
#-- group attributes for land_ice_segments
IS2_atl03_attrs[gtx]['land_ice_segments']['Description'] = ("The land_ice_segments group "
"contains the primary set of derived products. This includes geolocation, height, and "
"standard error and quality measures for each segment. This group is sparse, meaning "
"that parameters are provided only for pairs of segments for which at least one beam "
"has a valid surface-height measurement.")
IS2_atl03_attrs[gtx]['land_ice_segments']['data_rate'] = ("Data within this group are "
"sparse. Data values are provided only for those ICESat-2 20m segments where at "
"least one beam has a valid land ice height measurement.")
#-- geolocation, time and segment ID
#-- delta time
IS2_atl03_fit[gtx]['land_ice_segments']['delta_time'] = Segment_delta_time[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['delta_time'] = Segment_delta_time[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['delta_time'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['delta_time']['units'] = "seconds since 2018-01-01"
IS2_atl03_attrs[gtx]['land_ice_segments']['delta_time']['long_name'] = "Elapsed GPS seconds"
IS2_atl03_attrs[gtx]['land_ice_segments']['delta_time']['standard_name'] = "time"
IS2_atl03_attrs[gtx]['land_ice_segments']['delta_time']['calendar'] = "standard"
IS2_atl03_attrs[gtx]['land_ice_segments']['delta_time']['description'] = ("Number of GPS "
"seconds since the ATLAS SDP epoch. The ATLAS Standard Data Products (SDP) epoch offset "
"is defined within /ancillary_data/atlas_sdp_gps_epoch as the number of GPS seconds "
"between the GPS epoch (1980-01-06T00:00:00.000000Z UTC) and the ATLAS SDP epoch. By "
"adding the offset contained within atlas_sdp_gps_epoch to delta time parameters, the "
"time in gps_seconds relative to the GPS epoch can be computed.")
IS2_atl03_attrs[gtx]['land_ice_segments']['delta_time']['coordinates'] = \
"segment_id latitude longitude"
#-- latitude
IS2_atl03_fit[gtx]['land_ice_segments']['latitude'] = Segment_Latitude[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['latitude'] = Segment_Latitude[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['units'] = "degrees_north"
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['contentType'] = "physicalMeasurement"
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['long_name'] = "Latitude"
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['standard_name'] = "latitude"
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['description'] = ("Latitude of "
"segment center")
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['valid_min'] = -90.0
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['valid_max'] = 90.0
IS2_atl03_attrs[gtx]['land_ice_segments']['latitude']['coordinates'] = \
"segment_id delta_time longitude"
#-- longitude
IS2_atl03_fit[gtx]['land_ice_segments']['longitude'] = Segment_Longitude[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['longitude'] = Segment_Longitude[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['units'] = "degrees_east"
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['contentType'] = "physicalMeasurement"
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['long_name'] = "Longitude"
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['standard_name'] = "longitude"
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['description'] = ("Longitude of "
"segment center")
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['valid_min'] = -180.0
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['valid_max'] = 180.0
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['coordinates'] = \
"segment_id delta_time latitude"
#-- segment ID
IS2_atl03_fit[gtx]['land_ice_segments']['segment_id'] = Segment_ID[gtx][1:]
IS2_atl03_attrs[gtx]['land_ice_segments']['segment_id'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['segment_id']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['segment_id']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['segment_id']['long_name'] = "Along-track segment ID number"
IS2_atl03_attrs[gtx]['land_ice_segments']['segment_id']['description'] = ("A 7 digit number "
"identifying the along-track geolocation segment number. These are sequential, starting with "
"1 for the first segment after an ascending equatorial crossing node. Equal to the segment_id for "
"the second of the two 20m ATL03 segments included in the 40m ATL06 segment")
IS2_atl03_attrs[gtx]['land_ice_segments']['segment_id']['coordinates'] = \
"delta_time latitude longitude"
#-- land ice height corrected for first photon bias and transmit-pulse shape
IS2_atl03_fit[gtx]['land_ice_segments']['h_li'] = Segment_Land_Ice[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['h_li'] = Segment_Land_Ice[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li']['contentType'] = "physicalMeasurement"
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li']['long_name'] = "Land Ice height"
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li']['description'] = ("Standard land-ice segment "
"height determined by land ice algorithm, corrected for first-photon bias, representing the "
"median-based height of the selected PEs")
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li']['coordinates'] = \
"segment_id delta_time latitude longitude"
#-- land ice height errors (max of fit or first photon bias uncertainties)
IS2_atl03_fit[gtx]['land_ice_segments']['h_li_sigma'] = Segment_Land_Ice_Error[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['h_li_sigma'] = Segment_Land_Ice_Error[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li_sigma'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li_sigma']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li_sigma']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li_sigma']['long_name'] = "Expected RMS segment misfit"
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li_sigma']['description'] = ("Propagated error due to "
"sampling error and FPB correction from the land ice algorithm")
IS2_atl03_attrs[gtx]['land_ice_segments']['h_li_sigma']['coordinates'] = \
"segment_id delta_time latitude longitude"
#-- vertical geolocation error due to PPD and POD
IS2_atl03_fit[gtx]['land_ice_segments']['sigma_geo_h'] = Segment_sigma_geo[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['sigma_geo_h'] = Segment_sigma_geo[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['sigma_geo_h'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['sigma_geo_h']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['sigma_geo_h']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['sigma_geo_h']['long_name'] = "Vertical Geolocation Error"
IS2_atl03_attrs[gtx]['land_ice_segments']['sigma_geo_h']['description'] = ("Total vertical geolocation error "
"due to PPD and POD, including the effects of horizontal geolocation error on the segment vertical error.")
IS2_atl03_attrs[gtx]['land_ice_segments']['sigma_geo_h']['coordinates'] = \
"segment_id delta_time latitude longitude"
#-- segment quality summary
IS2_atl03_fit[gtx]['land_ice_segments']['atl06_quality_summary'] = Segment_Summary[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['atl06_quality_summary'] = Segment_Summary[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['atl06_quality_summary'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['atl06_quality_summary']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['atl06_quality_summary']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['atl06_quality_summary']['long_name'] = "ATL06 Quality Summary"
IS2_atl03_attrs[gtx]['land_ice_segments']['atl06_quality_summary']['description'] = ("The ATL06_quality_summary "
"parameter indicates the best-quality subset of all ATL06 data. A zero in this parameter implies that no "
"data-quality tests have found a problem with the segment, a one implies that some potential problem has "
"been found. Users who select only segments with zero values for this flag can be relatively certain of "
"obtaining high-quality data, but will likely miss a significant fraction of usable data, particularly in "
"cloudy, rough, or low-surface-reflectance conditions.")
IS2_atl03_attrs[gtx]['land_ice_segments']['atl06_quality_summary']['flag_meanings'] = \
"best_quality potential_problem"
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['valid_min'] = 0
IS2_atl03_attrs[gtx]['land_ice_segments']['longitude']['valid_max'] = 1
IS2_atl03_attrs[gtx]['land_ice_segments']['atl06_quality_summary']['coordinates'] = \
"segment_id delta_time latitude longitude"
#-- dem variables
IS2_atl03_fit[gtx]['land_ice_segments']['dem'] = {}
IS2_atl03_fill[gtx]['land_ice_segments']['dem'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['dem'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['Description'] = ("The dem group "
"contains the reference digital elevation model and geoid heights.")
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['data_rate'] = ("Data within this group "
"are stored at the land_ice_segments segment rate.")
#-- geoid height
fv = fileID[gtx]['geophys_corr']['geoid'].attrs['_FillValue']
geoid = np.ma.array(fileID[gtx]['geophys_corr']['geoid'][:], fill_value=fv)
geoid.mask = geoid.data == geoid.fill_value
geoid_h = (geoid[1:] + geoid[0:-1])/2.0
geoid_h.data[geoid_h.mask] = geoid_h.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['dem']['geoid_h'] = geoid_h
IS2_atl03_fill[gtx]['land_ice_segments']['dem']['geoid_h'] = geoid_h.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['long_name'] = "Geoid Height"
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['description'] = ("Geoid height above "
"WGS-84 reference ellipsoid (range -107 to 86m)")
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['source'] = "EGM2008"
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['valid_min'] = -107
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['valid_max'] = 86
IS2_atl03_attrs[gtx]['land_ice_segments']['dem']['geoid_h']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- geophysical variables
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical'] = {}
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['Description'] = ("The geophysical group "
"contains parameters used to correct segment heights for geophysical effects, parameters "
"related to solar background and parameters indicative of the presence or absence of clouds.")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['data_rate'] = ("Data within this group "
"are stored at the land_ice_segments segment rate.")
#-- background rate
bckgrd = (Segment_Background[gtx][1:] + Segment_Background[gtx][0:-1])/2.0
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['bckgrd'] = np.copy(bckgrd)
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['bckgrd'] = None
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bckgrd'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bckgrd']['units'] = "counts / second"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bckgrd']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bckgrd']['long_name'] = ("Background count "
"rate based on the ATLAS 50-shot sum interpolated to the reference photon")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bckgrd']['description'] = ("The background "
"count rate from the 50-shot altimetric histogram after removing the number of likely signal photons")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bckgrd']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- blowing snow PSC flag
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['bsnow_psc'] = \
IS2_atl09_mds[pfl]['high_rate']['bsnow_psc'][1:]
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['bsnow_psc'] = None
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['long_name'] = "Blowing snow PSC flag"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['description'] = ("Indicates the "
"potential for polar stratospheric clouds to affect the blowing snow retrieval, where 0=none and 3=maximum. "
"This flag is a function of month and hemisphere and is only applied poleward of 60 north and south")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['flag_meanings'] = ("none slight "
"moderate maximum_bsnow_PSC_affected")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['flag_values'] = [0,1,2,3]
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['valid_min'] = 0
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['valid_max'] = 3
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_psc']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- blowing snow confidence
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['bsnow_conf'] = \
IS2_atl09_mds[pfl]['high_rate']['bsnow_con'][1:]
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['bsnow_conf'] = None
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['long_name'] = "Blowing snow confidence"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['description'] = ("Indicates the blowing snow "
"confidence, where -3=surface not detected; 2=no surface wind;-1=no scattering layer found; 0=no top layer found; "
"1=none-little; 2=weak; 3=moderate; 4=moderate-high; 5=high; 6=very high")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['flag_meanings'] = ("surface_not_detected "
"no_surface_wind no_scattering_layer_found no_top_layer_found none_little weak moderate moderate_high high very_high")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['flag_values'] = [-3,-2,-1,0,1,2,3,4,5,6]
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['valid_min'] = -3
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['valid_max'] = 6
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_conf']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- blowing snow optical depth
bsnow_od = np.ma.array(IS2_atl09_mds[pfl]['high_rate']['bsnow_od'][1:],
fill_value=IS2_atl09_attrs[pfl]['high_rate']['bsnow_od']['_FillValue'])
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['bsnow_od'] = bsnow_od
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['bsnow_od'] = bsnow_od.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_od'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_od']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_od']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_od']['long_name'] = "Blowing snow OD"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_od']['description'] = "Blowing snow layer optical depth"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['bsnow_od']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- cloud flag ASR
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr'] = \
IS2_atl09_mds[pfl]['high_rate']['cloud_flag_asr'][1:]
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr'] = None
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['long_name'] = "Cloud Flag ASR"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['description'] = ("Indicates the Cloud "
"flag probability from apparent surface reflectance, where 0=clear with high confidence; 1=clear with medium "
"confidence; 2=clear with low confidence; 3=cloudy with low confidence; 4=cloudy with medium confidence; 5=cloudy "
"with high confidence; 6=unknown")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['flag_meanings'] = ("clear_with_high_confidence "
"clear_with_medium_confidence clear_with_low_confidence cloudy_with_low_confidence cloudy_with_medium_confidence "
"cloudy_with_high_confidence unknown")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['flag_values'] = [0,1,2,3,4,5,6]
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['valid_min'] = 0
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['valid_max'] = 6
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_asr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- cloud flag atm
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm'] = \
IS2_atl09_mds[pfl]['high_rate']['cloud_flag_atm'][1:]
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm'] = None
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['long_name'] = "Cloud Flag Atm"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['description'] = ("Number of layers found "
"from the backscatter profile using the DDA layer finder")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['flag_values'] = [0,1,2,3,4,5,6,7,8,9,10]
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['valid_min'] = 0
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['valid_max'] = 10
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['cloud_flag_atm']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- multiple scattering warning flag
msw_flag = np.ma.array(IS2_atl09_mds[pfl]['high_rate']['msw_flag'][1:],
fill_value=IS2_atl09_attrs[pfl]['high_rate']['msw_flag']['_FillValue'])
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['msw_flag'] = msw_flag
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['msw_flag'] = msw_flag.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['long_name'] = "Multiple Scattering Warning Flag"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['description'] = ("Combined flag indicating the "
"risks of severe multiple scattering. The multiple scattering warning flag (ATL09 parameter msw_flag) has values from "
"-1 to 5 where zero means no multiple scattering and 5 the greatest. If no layers were detected, then msw_flag = 0. "
"If blowing snow is detected and its estimated optical depth is greater than or equal to 0.5, then msw_flag = 5. "
"If the blowing snow optical depth is less than 0.5, then msw_flag = 4. If no blowing snow is detected but there are "
"cloud or aerosol layers detected, the msw_flag assumes values of 1 to 3 based on the height of the bottom of the "
"lowest layer: < 1 km, msw_flag = 3; 1-3 km, msw_flag = 2; > 3km, msw_flag = 1. A value of -1 indicates that the "
"signal to noise of the data was too low to reliably ascertain the presence of cloud or blowing snow. We expect "
"values of -1 to occur only during daylight.")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['flag_meanings'] = ("cannot_determine "
"no_layers layer_gt_3km layer_between_1_and_3_km layer_lt_1km blow_snow_od_lt_0.5 blow_snow_od_gt_0.5")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['flag_values'] = [-1,0,1,2,3,4,5]
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['valid_min'] = -1
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['valid_max'] = 5
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['msw_flag']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- range bias correction
fv = fileID[gtx]['geolocation']['range_bias_corr'].attrs['_FillValue']
range_bias_corr = np.ma.array(fileID[gtx]['geolocation']['range_bias_corr'][:], fill_value=fv)
range_bias_corr.mask = range_bias_corr.data == range_bias_corr.fill_value
segment_range_bias = (range_bias_corr[1:] + range_bias_corr[0:-1])/2.0
segment_range_bias.data[segment_range_bias.mask] = segment_range_bias.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['range_bias_corr'] = segment_range_bias
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['range_bias_corr'] = segment_range_bias.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['range_bias_corr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['range_bias_corr']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['range_bias_corr']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['range_bias_corr']['long_name'] = "Range bias correction"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['range_bias_corr']['description'] = "The range_bias estimated from geolocation analysis"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['range_bias_corr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- total neutral atmosphere delay correction
fv = fileID[gtx]['geolocation']['neutat_delay_total'].attrs['_FillValue']
neutat_delay_total = np.ma.array(fileID[gtx]['geolocation']['neutat_delay_total'][:], fill_value=fv)
neutat_delay_total.mask = neutat_delay_total.data == neutat_delay_total.fill_value
segment_neutat_delay = (neutat_delay_total[1:] + neutat_delay_total[0:-1])/2.0
segment_neutat_delay.data[segment_neutat_delay.mask] = segment_neutat_delay.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['neutat_delay_total'] = segment_neutat_delay
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['neutat_delay_total'] = segment_neutat_delay.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['neutat_delay_total'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['neutat_delay_total']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['neutat_delay_total']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['neutat_delay_total']['long_name'] = "Total Neutral Atmospheric Delay"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['neutat_delay_total']['description'] = "Total neutral atmosphere delay correction (wet+dry)"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['neutat_delay_total']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- solar elevation
fv = fileID[gtx]['geolocation']['solar_elevation'].attrs['_FillValue']
solar_elevation = np.ma.array(fileID[gtx]['geolocation']['solar_elevation'][:], fill_value=fv)
solar_elevation.mask = solar_elevation.data == solar_elevation.fill_value
segment_solar_elevation = (solar_elevation[1:] + solar_elevation[0:-1])/2.0
segment_solar_elevation.data[segment_solar_elevation.mask] = segment_solar_elevation.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['solar_elevation'] = segment_solar_elevation
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['solar_elevation'] = segment_solar_elevation.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_elevation'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_elevation']['units'] = "degrees"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_elevation']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_elevation']['long_name'] = "Solar elevation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_elevation']['description'] = ("Solar Angle above "
"or below the plane tangent to the ellipsoid surface at the laser spot. Positive values mean the sun is above the "
"horizon, while negative values mean it is below the horizon. The effect of atmospheric refraction is not included.")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_elevation']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- solar azimuth
fv = fileID[gtx]['geolocation']['solar_azimuth'].attrs['_FillValue']
solar_azimuth = np.ma.array(fileID[gtx]['geolocation']['solar_azimuth'][:], fill_value=fv)
solar_azimuth.mask = solar_azimuth.data == solar_azimuth.fill_value
segment_solar_azimuth = (solar_azimuth[1:] + solar_azimuth[0:-1])/2.0
segment_solar_azimuth.data[segment_solar_azimuth.mask] = segment_solar_azimuth.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['solar_azimuth'] = segment_solar_azimuth
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['solar_azimuth'] = segment_solar_azimuth.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_azimuth'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_azimuth']['units'] = "degrees_east"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_azimuth']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_azimuth']['long_name'] = "Solar azimuth"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_azimuth']['description'] = ("The direction, "
"eastwards from north, of the sun vector as seen by an observer at the laser ground spot.")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['solar_azimuth']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- geophysical correction values at segment reference photons
#-- dynamic atmospheric correction
fv = fileID[gtx]['geophys_corr']['dac'].attrs['_FillValue']
dac = np.ma.array(fileID[gtx]['geophys_corr']['dac'][:], fill_value=fv)
dac.mask = dac.data == dac.fill_value
segment_dac = (dac[1:] + dac[0:-1])/2.0
segment_dac.data[segment_dac.mask] = segment_dac.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['dac'] = segment_dac
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['dac'] = segment_dac.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['dac'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['dac']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['dac']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['dac']['long_name'] = "Dynamic Atmosphere Correction"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['dac']['description'] = ("Dynamic Atmospheric Correction "
"(DAC) includes inverted barometer (IB) effect")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['dac']['source'] = 'Mog2D-G'
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['dac']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- solid earth tide
fv = fileID[gtx]['geophys_corr']['tide_earth'].attrs['_FillValue']
tide_earth = np.ma.array(fileID[gtx]['geophys_corr']['tide_earth'][:], fill_value=fv)
tide_earth.mask = tide_earth.data == tide_earth.mask
segment_earth_tide = (tide_earth[1:] + tide_earth[0:-1])/2.0
segment_earth_tide.data[segment_earth_tide.mask] = segment_earth_tide.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['tide_earth'] = segment_earth_tide
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['tide_earth'] = segment_earth_tide.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_earth'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_earth']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_earth']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_earth']['long_name'] = "Earth Tide"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_earth']['description'] = "Solid Earth Tide"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_earth']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- load tide
fv = fileID[gtx]['geophys_corr']['tide_load'].attrs['_FillValue']
tide_load = np.ma.array(fileID[gtx]['geophys_corr']['tide_load'][:], fill_value=fv)
tide_load.mask = tide_load.data == tide_load.fill_value
segment_load_tide = (tide_load[1:] + tide_load[0:-1])/2.0
segment_load_tide.data[segment_load_tide.mask] = segment_load_tide.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['tide_load'] = segment_load_tide
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['tide_load'] = segment_load_tide.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_load'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_load']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_load']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_load']['long_name'] = "Load Tide"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_load']['description'] = ("Load Tide - Local "
"displacement due to Ocean Loading (-6 to 0 cm)")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_load']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- ocean tide
fv = fileID[gtx]['geophys_corr']['tide_ocean'].attrs['_FillValue']
tide_ocean = np.ma.array(fileID[gtx]['geophys_corr']['tide_ocean'][:], fill_value=fv)
tide_ocean.mask = tide_ocean.data == tide_ocean.fill_value
segment_ocean_tide = (tide_ocean[1:] + tide_ocean[0:-1])/2.0
segment_ocean_tide.data[segment_ocean_tide.mask] = segment_ocean_tide.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['tide_ocean'] = segment_ocean_tide
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['tide_ocean'] = segment_ocean_tide.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_ocean'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_ocean']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_ocean']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_ocean']['long_name'] = "Ocean Tide"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_ocean']['description'] = ("Ocean Tides "
"including diurnal and semi-diurnal (harmonic analysis), and longer period tides (dynamic and "
"self-consistent equilibrium).")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_ocean']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- ocean pole tide
fv = fileID[gtx]['geophys_corr']['tide_oc_pole'].attrs['_FillValue']
tide_oc_pole = np.ma.array(fileID[gtx]['geophys_corr']['tide_oc_pole'][:],
mask=(fileID[gtx]['geophys_corr']['tide_oc_pole'][:] == fv), fill_value=fv)
segment_oc_pole_tide = (tide_oc_pole[1:] + tide_oc_pole[0:-1])/2.0
segment_oc_pole_tide.data[segment_oc_pole_tide.mask] = segment_oc_pole_tide.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['tide_oc_pole'] = segment_oc_pole_tide
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['tide_oc_pole'] = segment_oc_pole_tide.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_oc_pole'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_oc_pole']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_oc_pole']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_oc_pole']['long_name'] = "Ocean Pole Tide"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_oc_pole']['description'] = ("Oceanic surface "
"rotational deformation due to polar motion (-2 to 2 mm).")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_oc_pole']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- pole tide
fv = fileID[gtx]['geophys_corr']['tide_pole'].attrs['_FillValue']
tide_pole = np.ma.array(fileID[gtx]['geophys_corr']['tide_pole'][:], fill_value=fv)
tide_pole.mask = tide_pole.data == tide_pole.fill_value
segment_pole_tide = (tide_pole[1:] + tide_pole[0:-1])/2.0
segment_pole_tide.data[segment_pole_tide.mask] = segment_pole_tide.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['geophysical']['tide_pole'] = segment_pole_tide
IS2_atl03_fill[gtx]['land_ice_segments']['geophysical']['tide_pole'] = segment_pole_tide.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_pole'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_pole']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_pole']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_pole']['long_name'] = "Solid Earth Pole Tide"
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_pole']['description'] = ("Solid Earth Pole "
"Tide - Rotational deformation due to polar motion (-1.5 to 1.5 cm).")
IS2_atl03_attrs[gtx]['land_ice_segments']['geophysical']['tide_pole']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- bias correction variables
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction'] = {}
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['Description'] = ("The bias_correction group "
"contains information about the estimated first-photon bias, and the transmit-pulse-shape bias.")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['data_rate'] = ("Data within this group "
"are stored at the land_ice_segments segment rate.")
#-- mean first photon bias
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr'] = FPB_mean_corr[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr'] = FPB_mean_corr[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr']['long_name'] = "first photon bias mean correction"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr']['description'] = ("Estimated first-photon-bias "
"(fpb) correction to mean segment height")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- mean first photon bias uncertainty
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma'] = FPB_mean_sigma[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma'] = FPB_mean_sigma[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma']['long_name'] = ("first photon bias "
"mean correction error")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma']['description'] = ("Estimated error in "
"first-photon-bias (fpb) correction for mean segment heights")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_mean_corr_sigma']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- median first photon bias
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr'] = FPB_median_corr[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr'] = FPB_median_corr[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr']['long_name'] = "first photon bias median correction"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr']['description'] = ("Estimated first-photon-bias "
"(fpb) correction giving the difference between the mean segment height and the corrected median height")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- median first photon bias correction
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma'] = FPB_median_sigma[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma'] = FPB_median_sigma[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma']['long_name'] = ("first photon bias median "
"correction error")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma']['description'] = ("Estimated error in "
"first-photon-bias (fpb) correction giving the difference between the mean segment height and the corrected median height")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_med_corr_sigma']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- first photon bias corrected number of photons
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr'] = FPB_n_corr[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr'] = FPB_n_corr[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr']['long_name'] = "corrected number of photons"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr']['description'] = ("Estimated photon count after "
"first-photon-bias correction")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_n_corr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- CAL-19 first photon bias
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr'] = FPB_cal_corr[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr'] = FPB_cal_corr[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr']['long_name'] = "first photon bias calibrated correction"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr']['description'] = ("Estimated first-photon-bias "
"(fpb) correction calculated using the ATL03 calibration products")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['fpb_cal_corr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- mean transmit pulse shape correction
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr'] = TPS_mean_corr[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr'] = TPS_mean_corr[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr']['long_name'] = "tx shape mean correction"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr']['description'] = ("Estimate of the difference "
"between the mean of the full-waveform transmit-pulse and the mean of a broadened, truncated waveform consistent "
"with the received pulse")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr']['source'] = tep[gtx]['pce']
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_mean_corr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- median transmit pulse shape correction
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['tx_med_corr'] = TPS_median_corr[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['tx_med_corr'] = TPS_median_corr[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_med_corr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_med_corr']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_med_corr']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_med_corr']['long_name'] = "tx shape median correction"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_med_corr']['description'] = ("Estimate of the difference "
"between the median of the full-waveform transmit-pulse and the median of a broadened, truncated waveform consistent "
"with the received pulse")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_med_corr']['source'] = tep[gtx]['pce']
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['tx_med_corr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- difference between the mean and median of fit residuals
IS2_atl03_fit[gtx]['land_ice_segments']['bias_correction']['med_r_fit'] = Segment_Mean_Median[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['bias_correction']['med_r_fit'] = Segment_Mean_Median[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['med_r_fit'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['med_r_fit']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['med_r_fit']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['med_r_fit']['long_name'] = "mean median residual"
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['med_r_fit']['description'] = ("Difference between "
"uncorrected mean and median of linear fit residuals")
IS2_atl03_attrs[gtx]['land_ice_segments']['bias_correction']['med_r_fit']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- fit statistics variables
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics'] = {}
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['Description'] = ("The fit_statistics group "
"contains a variety of parameters that might indicate the quality of the fitted segment data. Data in "
"this group are sparse, with dimensions matching the land_ice_segments group.")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['data_rate'] = ("Data within this group "
"are stored at the land_ice_segments segment rate.")
#-- segment fit heights
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['h_mean'] = Segment_Height[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['h_mean'] = Segment_Height[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_mean'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_mean']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_mean']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_mean']['long_name'] = "Height Mean"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_mean']['description'] = ("Mean surface "
"height, not corrected for first-photon bias or pulse truncation")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_mean']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- segment fit along-track slopes
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx'] = Segment_dH_along[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx'] = Segment_dH_along[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx']['units'] = "meters/meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx']['long_name'] = "Along Track Slope"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx']['description'] = ("Along-track slope "
"from along-track segment fit")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- segment fit across-track slopes
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy'] = Segment_dH_across[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy'] = Segment_dH_across[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy']['units'] = "meters/meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy']['long_name'] = "Across Track Slope"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy']['description'] = ("Across track slope "
"from segment fits to weak and strong beam; the same slope is reported for both laser beams in each pair")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- segment fit height errors
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean'] = Segment_Height_Error[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean'] = Segment_Height_Error[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean']['long_name'] = "Height Error"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean']['description'] = ("Propagated height "
"error due to PE-height sampling error for height from the along-track fit, not including geolocation-induced error")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['sigma_h_mean']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- segment fit across-track slope errors
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma'] = Segment_dH_along_Error[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma'] = Segment_dH_along_Error[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma']['units'] = "meters/meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma']['long_name'] = "Sigma of Along Track Slope"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma']['description'] = ("Propagated error in "
"the along-track segment slope")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dx_sigma']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- segment fit along-track slope errors
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma'] = Segment_dH_across_Error[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma'] = Segment_dH_across_Error[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma']['units'] = "meters/meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma']['long_name'] = "Sigma of Across Track Slope"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma']['description'] = ("Propagated error in "
"the across-track segment slope calculated from segment fits to weak and strong beam")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['dh_fit_dy_sigma']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- number of photons in fit
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons'] = Segment_N_Fit[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons'] = Segment_N_Fit[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons']['contentType'] = "physicalMeasurement"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons']['long_name'] = "Number of Photons in Fit"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons']['description'] = ("Number of PEs used to "
"determine mean surface height in the iterative surface fit")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_photons']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- size of the window used in the fit
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final'] = Segment_Window[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final'] = Segment_Window[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final']['contentType'] = "physicalMeasurement"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final']['long_name'] = "Surface Window Width"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final']['description'] = ("Width of the surface "
"window, top to bottom")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['w_surface_window_final']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- signal-to-noise ratio
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['snr'] = Segment_SNR[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['snr'] = Segment_SNR[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr']['contentType'] = "physicalMeasurement"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr']['long_name'] = "SNR"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr']['description'] = ("Signal-to-noise "
"ratio in the final refined window")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- segment photon signal-to-noise ratio from photon classifier
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph'] = Segment_Photon_SNR[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph'] = Segment_Photon_SNR[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph']['long_name'] = "Maximum SNR"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph']['description'] = ("Maximum "
"signal-to-noise ratio from the photon event classifier used to normalize the photon weights")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['snr_norm_ph']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- robust dispersion estimator
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd'] = Segment_RDE[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd'] = Segment_RDE[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd']['long_name'] = "Robust Spread"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd']['description'] = ("RDE of misfit "
"between PE heights and the along-track segment fit")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['h_robust_sprd']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- number of iterations for fit
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations'] = Segment_Iterations[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations'] = Segment_Iterations[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations']['long_name'] = "Number of Iterations used in Fit"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations']['description'] = ("Number of Iterations when "
"determining the mean surface height")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_fit_iterations']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- number of photon event clusters
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['n_clusters'] = Segment_Clusters[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['n_clusters'] = Segment_Clusters[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_clusters'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_clusters']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_clusters']['contentType'] = "modelResult"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_clusters']['long_name'] = "Number of Estimated Clusters"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_clusters']['description'] = ("Number of clusters calculated "
"using weighted density-based spatial clustering")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_clusters']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- signal source selection
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source'] = Segment_Source[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source'] = Segment_Source[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['contentType'] = "qualityInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['long_name'] = "Signal Selection Source"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['description'] = ("Indicates the last "
"algorithm attempted to select the signal for fitting. 1=Signal selection succeeded using ATL03 detected PE; 2=Signal "
"selection failed using ATL03 detected PE but succeeded using all flagged ATL03 PE; 3=Signal selection failed using "
"all flagged ATL03 PE, but succeeded using the backup algorithm; 4=All signal-finding strategies failed.")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['flag_values'] = [1,2,3,4]
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['valid_min'] = 1
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['valid_max'] = 4
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['signal_selection_source']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- number potential segment pulses
IS2_atl03_fit[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses'] = Segment_Pulses[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses'] = Segment_Pulses[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses']['units'] = "1"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses']['long_name'] = "Number potential segment pulses"
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses']['description'] = ("The number of pulses "
"potentially included in the segment")
IS2_atl03_attrs[gtx]['land_ice_segments']['fit_statistics']['n_seg_pulses']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- ground track variables
IS2_atl03_fit[gtx]['land_ice_segments']['ground_track'] = {}
IS2_atl03_fill[gtx]['land_ice_segments']['ground_track'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['Description'] = ("The ground_track group "
"contains parameters describing the GT and RGT for each land ice segment, as well as angular "
"information about the beams.")
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['data_rate'] = ("Data within this group "
"are stored at the land_ice_segments segment rate.")
#-- along-track X coordinates of segment fit
IS2_atl03_fit[gtx]['land_ice_segments']['ground_track']['x_atc'] = Segment_X_atc[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['ground_track']['x_atc'] = Segment_X_atc[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_atc'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_atc']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_atc']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_atc']['long_name'] = "X Along Track"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_atc']['description'] = ("The along-track "
"x-coordinate of the segment, measured parallel to the RGT, measured from the ascending node of the equatorial "
"crossing of a given RGT.")
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_atc']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- along-track Y coordinates of segment fit
IS2_atl03_fit[gtx]['land_ice_segments']['ground_track']['y_atc'] = Segment_Y_atc[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['ground_track']['y_atc'] = Segment_Y_atc[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['y_atc'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['y_atc']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['y_atc']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['y_atc']['long_name'] = "Y Along Track"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['y_atc']['description'] = ("The along-track "
"y-coordinate of the segment, relative to the RGT, measured along the perpendicular to the RGT, "
"positive to the right of the RGT.")
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['y_atc']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- along-track X coordinate spread of points used in segment fit
IS2_atl03_fit[gtx]['land_ice_segments']['ground_track']['x_spread'] = Segment_X_spread[gtx]
IS2_atl03_fill[gtx]['land_ice_segments']['ground_track']['x_spread'] = Segment_X_spread[gtx].fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_spread'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_spread']['units'] = "meters"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_spread']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_spread']['long_name'] = "X Along Track Spread"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_spread']['description'] = ("The spread of "
"along-track x-coordinates for points used in the segment fit. Coordinates measured parallel to the "
"RGT, measured from the ascending node of the equatorial crossing of a given RGT.")
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['x_spread']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- elevation
fv = fileID[gtx]['geolocation']['ref_elev'].attrs['_FillValue']
ref_elev = np.ma.array(fileID[gtx]['geolocation']['ref_elev'][:], fill_value=fv)
ref_elev.mask = ref_elev.data == ref_elev.fill_value
segment_ref_elev = (ref_elev[1:] + ref_elev[0:-1])/2.0
segment_ref_elev.data[segment_ref_elev.mask] = segment_ref_elev.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['ground_track']['ref_elev'] = segment_ref_elev
IS2_atl03_fill[gtx]['land_ice_segments']['ground_track']['ref_elev'] = segment_ref_elev.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_elev'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_elev']['units'] = "radians"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_elev']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_elev']['long_name'] = "Elevation"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_elev']['description'] = ("Elevation of the "
"unit pointing vector for the reference photon in the local ENU frame in radians. The angle is measured "
"from East-North plane and positive towards Up")
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_elev']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- azimuth
fv = fileID[gtx]['geolocation']['ref_azimuth'].attrs['_FillValue']
ref_azimuth = np.ma.array(fileID[gtx]['geolocation']['ref_azimuth'][:], fill_value=fv)
ref_azimuth.mask = ref_azimuth.data == ref_azimuth.fill_value
segment_ref_azimuth = (ref_azimuth[1:] + ref_azimuth[0:-1])/2.0
segment_ref_azimuth.data[segment_ref_azimuth.mask] = segment_ref_azimuth.fill_value
IS2_atl03_fit[gtx]['land_ice_segments']['ground_track']['ref_azimuth'] = segment_ref_azimuth
IS2_atl03_fill[gtx]['land_ice_segments']['ground_track']['ref_azimuth'] = segment_ref_azimuth.fill_value
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_azimuth'] = {}
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_azimuth']['units'] = "radians"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_azimuth']['contentType'] = "referenceInformation"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_azimuth']['long_name'] = "Azimuth"
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_azimuth']['description'] = ("Azimuth of the "
"unit pointing vector for the reference photon in the local ENU frame in radians. The angle is measured "
"from North and positive towards East")
IS2_atl03_attrs[gtx]['land_ice_segments']['ground_track']['ref_azimuth']['coordinates'] = \
"../segment_id ../delta_time ../latitude ../longitude"
#-- parallel h5py I/O does not support compression filters at this time
if (comm.rank == 0):
#-- use default output file name and path
if args.output:
output_file=os.path.expanduser(args.output)
else:
fargs=(SUB,'ATL06',YY,MM,DD,HH,MN,SS,TRK,CYCL,GRAN,RL,VERS,AUX)
file_format='{0}{1}_{2}{3}{4}{5}{6}{7}_{8}{9}{10}_{11}_{12}{13}.h5'
output_file=os.path.join(ATL03_dir,file_format.format(*fargs))
#-- write to HDF5 file
HDF5_ATL03_write(IS2_atl03_fit, IS2_atl03_attrs, COMM=comm,
VERBOSE=args.verbose, INPUT=[args.ATL03,args.ATL09],
FILL_VALUE=IS2_atl03_fill, CLOBBER=True, FILENAME=output_file)
#-- change the permissions level to MODE
os.chmod(output_file, args.mode)
#-- close the input ATL03 file
fileID.close()
#-- PURPOSE: read ICESat-2 ATL09 HDF5 data file for specific variables
def read_HDF5_ATL09(FILENAME, pfl, D, ATTRIBUTES=True, VERBOSE=False, COMM=None):
#-- Open the HDF5 file for reading
fileID = h5py.File(FILENAME, 'r', driver='mpio', comm=COMM)
print(FILENAME) if VERBOSE and (COMM.rank == 0) else None
#-- allocate python dictionaries for ICESat-2 ATL09 variables and attributes
IS2_atl09_mds = {}
IS2_atl09_attrs = {}
#-- read profile reported for the ATLAS strong beams within the file
IS2_atl09_mds[pfl] = dict(high_rate={})
#-- extract delta_time for mapping ATL09 atmospheric parameters to ATL03
delta_time = fileID[pfl]['high_rate']['delta_time'][:]
#-- Calibrated Attenuated Backscatter at 25 hz
high_rate_keys = ['aclr_true','bsnow_con','bsnow_dens','bsnow_h',
'bsnow_h_dens','bsnow_od','bsnow_psc','cloud_flag_asr','cloud_flag_atm',
'cloud_fold_flag','column_od_asr','column_od_asr_qf','msw_flag',
'snow_ice','solar_azimuth','solar_elevation','surf_refl_true']
#-- number of output ATL03 segments
n_seg = len(D)
#-- parallel indices for filling variables
ii = np.arange(COMM.rank,n_seg,COMM.size)
#-- extract variables of interest and map to ATL03 segments
for key in high_rate_keys:
val = np.copy(fileID[pfl]['high_rate'][key][:])
fint = scipy.interpolate.interp1d(delta_time, val,
kind='nearest', fill_value='extrapolate')
IS2_atl09_mds[pfl]['high_rate'][key] = np.zeros((n_seg),dtype=val.dtype)
IS2_atl09_mds[pfl]['high_rate'][key][ii] = fint(D[ii]).astype(val.dtype)
#-- Getting attributes of included variables
if ATTRIBUTES:
#-- Getting attributes of IS2_atl09_mds profile variables
IS2_atl09_attrs[pfl] = dict(high_rate={})
#-- Global Group Attributes
for att_name,att_val in fileID[pfl].attrs.items():
IS2_atl09_attrs[pfl][att_name] = att_val
#-- Variable Attributes
for key in high_rate_keys:
IS2_atl09_attrs[pfl]['high_rate'][key] = {}
for att_name,att_val in fileID[pfl]['high_rate'][key].attrs.items():
IS2_atl09_attrs[pfl]['high_rate'][key][att_name] = att_val
#-- Global File Attributes
if ATTRIBUTES:
for att_name,att_val in fileID.attrs.items():
IS2_atl09_attrs[att_name] = att_val
#-- Closing the HDF5 file
fileID.close()
#-- Return the datasets and variables
return (IS2_atl09_mds,IS2_atl09_attrs)
#-- PURPOSE: outputting the reduced and corrected ICESat-2 data to HDF5
def HDF5_ATL03_write(IS2_atl03_data, IS2_atl03_attrs, COMM=None, INPUT=None,
FILENAME='', FILL_VALUE=None, CLOBBER=True, VERBOSE=False):
#-- setting HDF5 clobber attribute
if CLOBBER:
clobber = 'w'
else:
clobber = 'w-'
#-- open output HDF5 file
fileID = h5py.File(FILENAME, clobber)#, driver='mpio', comm=COMM)
print(FILENAME) if VERBOSE and (COMM.rank == 0) else None
#-- create HDF5 records
h5 = {}
# #-- ICESat-2 spacecraft orientation at time
# fileID.create_group('orbit_info')
# h5['orbit_info'] = {}
# for k,v in IS2_atl03_data['orbit_info'].items():
# #-- Defining the HDF5 dataset variables
# val = 'orbit_info/{0}'.format(k)
# h5['orbit_info'][k] = fileID.create_dataset(val, np.shape(v), data=v,
# dtype=v.dtype, compression='gzip')
# #-- add HDF5 variable attributes
# for att_name,att_val in IS2_atl03_attrs['orbit_info'][k].items():
# h5['orbit_info'][k].attrs[att_name] = att_val
#-- information ancillary to the data product
#-- number of GPS seconds between the GPS epoch (1980-01-06T00:00:00Z UTC)
#-- and ATLAS Standard Data Product (SDP) epoch (2018-01-01T00:00:00Z UTC)
h5['ancillary_data'] = {}
for k in ['atlas_sdp_gps_epoch','data_end_utc','data_start_utc','end_cycle',
'end_geoseg','end_gpssow','end_gpsweek','end_orbit','end_region',
'end_rgt','granule_end_utc','granule_start_utc','release','start_cycle',
'start_geoseg','start_gpssow','start_gpsweek','start_orbit','start_region',
'start_rgt','version']:
#-- Defining the HDF5 dataset variables
v = IS2_atl03_data['ancillary_data'][k]
val = 'ancillary_data/{0}'.format(k)
h5['ancillary_data'][k] = fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype)
#-- add HDF5 variable attributes
for att_name,att_val in IS2_atl03_attrs['ancillary_data'][k].items():
h5['ancillary_data'][k].attrs[att_name] = att_val
#-- land_ice_segments variable groups for each beam
GROUPS=['fit_statistics','geophysical','ground_track','dem','bias_correction']
#-- write each output beam
for gtx in ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']:
fileID.create_group(gtx)
fileID['ancillary_data'].create_group(gtx)
#-- add HDF5 group attributes for beam
for att_name in ['Description','atlas_pce','atlas_beam_type',
'groundtrack_id','atmosphere_profile','atlas_spot_number',
'sc_orientation']:
fileID[gtx].attrs[att_name] = IS2_atl03_attrs[gtx][att_name]
#-- add transmit pulse shape and dead time parameters
h5['ancillary_data'][gtx] = {}
for k,v in IS2_atl03_data['ancillary_data'][gtx].items():
#-- attributes
attrs = IS2_atl03_attrs['ancillary_data'][gtx][k]
#-- Defining the HDF5 dataset variables
val = 'ancillary_data/{0}/{1}'.format(gtx,k)
h5['ancillary_data'][gtx][k] = fileID.create_dataset(val,
np.shape(v), data=v, dtype=v.dtype)
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5['ancillary_data'][gtx][k].attrs[att_name] = att_val
#-- create land_ice_segments group
fileID[gtx].create_group('land_ice_segments')
h5[gtx] = dict(land_ice_segments={})
for att_name in ['Description','data_rate']:
att_val = IS2_atl03_attrs[gtx]['land_ice_segments'][att_name]
fileID[gtx]['land_ice_segments'].attrs[att_name] = att_val
#-- segment_id
v = IS2_atl03_data[gtx]['land_ice_segments']['segment_id']
attrs = IS2_atl03_attrs[gtx]['land_ice_segments']['segment_id']
# #-- parallel indices for filling variables
# pind = np.arange(COMM.rank,len(v),COMM.size)
#-- Defining the HDF5 dataset variables
val = '{0}/{1}/{2}'.format(gtx,'land_ice_segments','segment_id')
h5[gtx]['land_ice_segments']['segment_id'] = fileID.create_dataset(val,
np.shape(v), data=v, dtype=v.dtype, compression='gzip')
# with h5[gtx]['land_ice_segments']['segment_ID'].collective:
# h5[gtx]['land_ice_segments']['segment_id'][pind] = v[pind]
#-- make dimension
h5[gtx]['land_ice_segments']['segment_id'].make_scale('segment_id')
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5[gtx]['land_ice_segments']['segment_id'].attrs[att_name] = att_val
#-- geolocation, time and height variables
for k in ['latitude','longitude','delta_time','h_li','h_li_sigma',
'sigma_geo_h','atl06_quality_summary']:
#-- values and attributes
v = IS2_atl03_data[gtx]['land_ice_segments'][k]
attrs = IS2_atl03_attrs[gtx]['land_ice_segments'][k]
fillvalue = FILL_VALUE[gtx]['land_ice_segments'][k]
#-- Defining the HDF5 dataset variables
val = '{0}/{1}/{2}'.format(gtx,'land_ice_segments',k)
h5[gtx]['land_ice_segments'][k] = fileID.create_dataset(val,
np.shape(v), data=v, dtype=v.dtype, fillvalue=fillvalue,
compression='gzip')
# with h5[gtx]['land_ice_segments'][k].collective:
# h5[gtx]['land_ice_segments'][k][pind] = v[pind]
#-- attach dimensions
for i,dim in enumerate(['segment_id']):
h5[gtx]['land_ice_segments'][k].dims[i].attach_scale(
h5[gtx]['land_ice_segments'][dim])
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5[gtx]['land_ice_segments'][k].attrs[att_name] = att_val
#-- fit statistics, geophysical corrections, geolocation and dem
for key in GROUPS:
fileID[gtx]['land_ice_segments'].create_group(key)
h5[gtx]['land_ice_segments'][key] = {}
for att_name in ['Description','data_rate']:
att_val=IS2_atl03_attrs[gtx]['land_ice_segments'][key][att_name]
fileID[gtx]['land_ice_segments'][key].attrs[att_name] = att_val
for k,v in IS2_atl03_data[gtx]['land_ice_segments'][key].items():
#-- attributes
attrs = IS2_atl03_attrs[gtx]['land_ice_segments'][key][k]
fillvalue = FILL_VALUE[gtx]['land_ice_segments'][key][k]
#-- Defining the HDF5 dataset variables
val = '{0}/{1}/{2}/{3}'.format(gtx,'land_ice_segments',key,k)
if fillvalue:
h5[gtx]['land_ice_segments'][key][k] = \
fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, fillvalue=fillvalue, compression='gzip')
else:
h5[gtx]['land_ice_segments'][key][k] = \
fileID.create_dataset(val, np.shape(v), data=v,
dtype=v.dtype, compression='gzip')
# with h5[gtx]['land_ice_segments'][key][k].collective:
# h5[gtx]['land_ice_segments'][key][k][pind] = v[pind]
#-- attach dimensions
for i,dim in enumerate(['segment_id']):
h5[gtx]['land_ice_segments'][key][k].dims[i].attach_scale(
h5[gtx]['land_ice_segments'][dim])
#-- add HDF5 variable attributes
for att_name,att_val in attrs.items():
h5[gtx]['land_ice_segments'][key][k].attrs[att_name] = att_val
#-- HDF5 file title
fileID.attrs['featureType'] = 'trajectory'
fileID.attrs['title'] = 'ATLAS/ICESat-2 Land Ice Height'
fileID.attrs['summary'] = ('Estimates of the ice-sheet mean surface height '
'relative to the WGS-84 ellipsoid, and ancillary parameters needed to '
'interpret and assess the quality of these height estimates.')
fileID.attrs['description'] = ('Land ice surface heights for each beam, '
'along and across-track slopes calculated for beam pairs. All '
'parameters are calculated for the same along-track increments for '
'each beam and repeat.')
date_created = datetime.datetime.today()
fileID.attrs['date_created'] = date_created.isoformat()
project = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'
fileID.attrs['project'] = project
platform = 'ICESat-2 > Ice, Cloud, and land Elevation Satellite-2'
fileID.attrs['project'] = platform
#-- add attribute for elevation instrument and designated processing level
instrument = 'ATLAS > Advanced Topographic Laser Altimeter System'
fileID.attrs['instrument'] = instrument
fileID.attrs['source'] = 'Spacecraft'
fileID.attrs['references'] = 'https://nsidc.org/data/icesat-2'
fileID.attrs['processing_level'] = '4'
#-- add attributes for input ATL03 and ATL09 files
fileID.attrs['input_files'] = ','.join([os.path.basename(i) for i in INPUT])
#-- find geospatial and temporal ranges
lnmn,lnmx,ltmn,ltmx,tmn,tmx = (np.inf,-np.inf,np.inf,-np.inf,np.inf,-np.inf)
for gtx in ['gt1l','gt1r','gt2l','gt2r','gt3l','gt3r']:
lon = IS2_atl03_data[gtx]['land_ice_segments']['longitude']
lat = IS2_atl03_data[gtx]['land_ice_segments']['latitude']
delta_time = IS2_atl03_data[gtx]['land_ice_segments']['delta_time']
#-- setting the geospatial and temporal ranges
lnmn = lon.min() if (lon.min() < lnmn) else lnmn
lnmx = lon.max() if (lon.max() > lnmx) else lnmx
ltmn = lat.min() if (lat.min() < ltmn) else ltmn
ltmx = lat.max() if (lat.max() > ltmx) else ltmx
tmn = delta_time.min() if (delta_time.min() < tmn) else tmn
tmx = delta_time.max() if (delta_time.max() > tmx) else tmx
#-- add geospatial and temporal attributes
fileID.attrs['geospatial_lat_min'] = ltmn
fileID.attrs['geospatial_lat_max'] = ltmx
fileID.attrs['geospatial_lon_min'] = lnmn
fileID.attrs['geospatial_lon_max'] = lnmx
fileID.attrs['geospatial_lat_units'] = "degrees_north"
fileID.attrs['geospatial_lon_units'] = "degrees_east"
fileID.attrs['geospatial_ellipsoid'] = "WGS84"
fileID.attrs['date_type'] = 'UTC'
fileID.attrs['time_type'] = 'CCSDS UTC-A'
#-- convert start and end time from ATLAS SDP seconds into UTC time
time_utc = convert_delta_time(np.array([tmn,tmx]))
#-- convert to calendar date
#-- convert to calendar date
YY,MM,DD,HH,MN,SS = icesat2_toolkit.time.convert_julian(time_utc['julian'],
FORMAT='tuple')
#-- add attributes with measurement date start, end and duration
tcs = datetime.datetime(int(YY[0]), int(MM[0]), int(DD[0]),
int(HH[0]), int(MN[0]), int(SS[0]), int(1e6*(SS[0] % 1)))
fileID.attrs['time_coverage_start'] = tcs.isoformat()
tce = datetime.datetime(int(YY[1]), int(MM[1]), int(DD[1]),
int(HH[1]), int(MN[1]), int(SS[1]), int(1e6*(SS[1] % 1)))
fileID.attrs['time_coverage_end'] = tce.isoformat()
fileID.attrs['time_coverage_duration'] = '{0:0.0f}'.format(tmx-tmn)
#-- Closing the HDF5 file
fileID.close()
#-- run main program
if __name__ == '__main__':
main()
| [
"numpy.sqrt",
"re.compile",
"numpy.count_nonzero",
"numpy.array",
"datetime.datetime.today",
"numpy.arange",
"numpy.mean",
"argparse.ArgumentParser",
"os.chmod",
"numpy.max",
"os.getpid",
"os.path.expanduser",
"numpy.abs",
"numpy.ones",
"numpy.ma.array",
"numpy.ma.zeros",
"re.match",... | [((3996, 4152), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Read ICESat-2 ATL03 and ATL09 data files to calculate\n average segment surfaces\n """'}), '(description=\n """Read ICESat-2 ATL03 and ATL09 data files to calculate\n average segment surfaces\n """\n )\n', (4019, 4152), False, 'import argparse\n'), ((5455, 5482), 'os.path.dirname', 'os.path.dirname', (['args.ATL03'], {}), '(args.ATL03)\n', (5470, 5482), False, 'import os\n'), ((5575, 5721), 're.compile', 're.compile', (['"""(processed)?(ATL\\\\d+)_(\\\\d{4})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})_(\\\\d{4})(\\\\d{2})(\\\\d{2})_(\\\\d{3})_(\\\\d{2})(.*?).h5$"""'], {}), "(\n '(processed)?(ATL\\\\d+)_(\\\\d{4})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})(\\\\d{2})_(\\\\d{4})(\\\\d{2})(\\\\d{2})_(\\\\d{3})_(\\\\d{2})(.*?).h5$'\n )\n", (5585, 5721), False, 'import re\n'), ((6133, 6185), 'h5py.File', 'h5py.File', (['args.ATL03', '"""r"""'], {'driver': '"""mpio"""', 'comm': 'comm'}), "(args.ATL03, 'r', driver='mpio', comm=comm)\n", (6142, 6185), False, 'import h5py\n'), ((155318, 155368), 'h5py.File', 'h5py.File', (['FILENAME', '"""r"""'], {'driver': '"""mpio"""', 'comm': 'COMM'}), "(FILENAME, 'r', driver='mpio', comm=COMM)\n", (155327, 155368), False, 'import h5py\n'), ((156276, 156314), 'numpy.arange', 'np.arange', (['COMM.rank', 'n_seg', 'COMM.size'], {}), '(COMM.rank, n_seg, COMM.size)\n', (156285, 156314), True, 'import numpy as np\n'), ((158007, 158035), 'h5py.File', 'h5py.File', (['FILENAME', 'clobber'], {}), '(FILENAME, clobber)\n', (158016, 158035), False, 'import h5py\n'), ((166038, 166063), 'datetime.datetime.today', 'datetime.datetime.today', ([], {}), '()\n', (166061, 166063), False, 'import datetime\n'), ((9699, 9771), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['tide_ocean'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['tide_ocean'][:], fill_value=fv)\n", (9710, 9771), True, 'import numpy as np\n'), ((10989, 11041), 'numpy.copy', 'np.copy', (["fileID[tep1][pce][tep2]['tep_hist_time'][:]"], {}), "(fileID[tep1][pce][tep2]['tep_hist_time'][:])\n", (10996, 11041), True, 'import numpy as np\n'), ((11061, 11108), 'numpy.copy', 'np.copy', (["fileID[tep1][pce][tep2]['tep_hist'][:]"], {}), "(fileID[tep1][pce][tep2]['tep_hist'][:])\n", (11068, 11108), True, 'import numpy as np\n'), ((11760, 11786), 'numpy.mean', 'np.mean', (['channel_dead_time'], {}), '(channel_dead_time)\n', (11767, 11786), True, 'import numpy as np\n'), ((12265, 12283), 'numpy.shape', 'np.shape', (['fpb_corr'], {}), '(fpb_corr)\n', (12273, 12283), True, 'import numpy as np\n'), ((12313, 12331), 'numpy.zeros', 'np.zeros', (['(ns, nw)'], {}), '((ns, nw))\n', (12321, 12331), True, 'import numpy as np\n'), ((12996, 13037), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (13007, 13037), True, 'import numpy as np\n'), ((13077, 13103), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (13084, 13103), True, 'import numpy as np\n'), ((13166, 13207), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (13177, 13207), True, 'import numpy as np\n'), ((13243, 13269), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (13250, 13269), True, 'import numpy as np\n'), ((13387, 13428), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (13398, 13428), True, 'import numpy as np\n'), ((13466, 13492), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (13473, 13492), True, 'import numpy as np\n'), ((13568, 13609), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (13579, 13609), True, 'import numpy as np\n'), ((13647, 13673), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (13654, 13673), True, 'import numpy as np\n'), ((13748, 13789), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (13759, 13789), True, 'import numpy as np\n'), ((13831, 13857), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (13838, 13857), True, 'import numpy as np\n'), ((13979, 14020), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (13990, 14020), True, 'import numpy as np\n'), ((14064, 14090), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (14071, 14090), True, 'import numpy as np\n'), ((14178, 14219), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (14189, 14219), True, 'import numpy as np\n'), ((14263, 14289), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (14270, 14289), True, 'import numpy as np\n'), ((14409, 14450), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (14420, 14450), True, 'import numpy as np\n'), ((14491, 14517), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (14498, 14517), True, 'import numpy as np\n'), ((14600, 14641), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (14611, 14641), True, 'import numpy as np\n'), ((14676, 14702), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (14683, 14702), True, 'import numpy as np\n'), ((14809, 14850), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (14820, 14850), True, 'import numpy as np\n'), ((14888, 14914), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (14895, 14914), True, 'import numpy as np\n'), ((14997, 15038), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (15008, 15038), True, 'import numpy as np\n'), ((15073, 15099), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (15080, 15099), True, 'import numpy as np\n'), ((15170, 15211), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (15181, 15211), True, 'import numpy as np\n'), ((15250, 15276), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (15257, 15276), True, 'import numpy as np\n'), ((15345, 15386), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (15356, 15386), True, 'import numpy as np\n'), ((15424, 15450), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (15431, 15450), True, 'import numpy as np\n'), ((15517, 15561), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (15528, 15561), True, 'import numpy as np\n'), ((15595, 15621), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (15602, 15621), True, 'import numpy as np\n'), ((15699, 15740), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (15710, 15740), True, 'import numpy as np\n'), ((15776, 15802), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (15783, 15802), True, 'import numpy as np\n'), ((15870, 15911), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (15881, 15911), True, 'import numpy as np\n'), ((15944, 15970), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (15951, 15970), True, 'import numpy as np\n'), ((16032, 16073), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (16043, 16073), True, 'import numpy as np\n'), ((16106, 16132), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (16113, 16132), True, 'import numpy as np\n'), ((16232, 16275), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(0)', 'dtype': 'int'}), '(n_seg, fill_value=0, dtype=int)\n', (16243, 16275), True, 'import numpy as np\n'), ((16314, 16340), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (16321, 16340), True, 'import numpy as np\n'), ((16408, 16452), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (16419, 16452), True, 'import numpy as np\n'), ((16488, 16514), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (16495, 16514), True, 'import numpy as np\n'), ((16590, 16634), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (16601, 16634), True, 'import numpy as np\n'), ((16673, 16699), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (16680, 16699), True, 'import numpy as np\n'), ((16781, 16824), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(0)', 'dtype': 'int'}), '(n_seg, fill_value=0, dtype=int)\n', (16792, 16824), True, 'import numpy as np\n'), ((16861, 16887), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (16868, 16887), True, 'import numpy as np\n'), ((16954, 16997), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(4)', 'dtype': 'int'}), '(n_seg, fill_value=4, dtype=int)\n', (16965, 16997), True, 'import numpy as np\n'), ((17032, 17058), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (17039, 17058), True, 'import numpy as np\n'), ((17129, 17173), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (17140, 17173), True, 'import numpy as np\n'), ((17208, 17234), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (17215, 17234), True, 'import numpy as np\n'), ((17312, 17353), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (17323, 17353), True, 'import numpy as np\n'), ((17396, 17422), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (17403, 17422), True, 'import numpy as np\n'), ((17461, 17502), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (17472, 17502), True, 'import numpy as np\n'), ((17546, 17572), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (17553, 17572), True, 'import numpy as np\n'), ((17612, 17653), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (17623, 17653), True, 'import numpy as np\n'), ((17698, 17724), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (17705, 17724), True, 'import numpy as np\n'), ((17765, 17806), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (17776, 17806), True, 'import numpy as np\n'), ((17852, 17878), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (17859, 17878), True, 'import numpy as np\n'), ((17913, 17957), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (17924, 17957), True, 'import numpy as np\n'), ((17996, 18022), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (18003, 18022), True, 'import numpy as np\n'), ((18059, 18100), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (18070, 18100), True, 'import numpy as np\n'), ((18142, 18168), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (18149, 18168), True, 'import numpy as np\n'), ((18254, 18295), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (18265, 18295), True, 'import numpy as np\n'), ((18338, 18364), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (18345, 18364), True, 'import numpy as np\n'), ((18404, 18445), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (18415, 18445), True, 'import numpy as np\n'), ((18490, 18516), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (18497, 18516), True, 'import numpy as np\n'), ((19538, 19582), 'numpy.unique', 'np.unique', (['pce_mframe_cnt'], {'return_index': '(True)'}), '(pce_mframe_cnt, return_index=True)\n', (19547, 19582), True, 'import numpy as np\n'), ((19868, 19900), 'numpy.zeros', 'np.zeros', (['n_pe'], {'dtype': 'np.float64'}), '(n_pe, dtype=np.float64)\n', (19876, 19900), True, 'import numpy as np\n'), ((20967, 20999), 'numpy.zeros', 'np.zeros', (['n_pe'], {'dtype': 'np.float64'}), '(n_pe, dtype=np.float64)\n', (20975, 20999), True, 'import numpy as np\n'), ((21429, 21520), 'numpy.nonzero', 'np.nonzero', (['((Segment_Index_begin[gtx][:-1] >= 0) & (Segment_Index_begin[gtx][1:] >= 0))'], {}), '((Segment_Index_begin[gtx][:-1] >= 0) & (Segment_Index_begin[gtx]\n [1:] >= 0))\n', (21439, 21520), True, 'import numpy as np\n'), ((47979, 48020), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (47990, 48020), True, 'import numpy as np\n'), ((48061, 48087), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (48068, 48087), True, 'import numpy as np\n'), ((48490, 48531), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (48501, 48531), True, 'import numpy as np\n'), ((48568, 48594), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (48575, 48594), True, 'import numpy as np\n'), ((49032, 49073), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (49043, 49073), True, 'import numpy as np\n'), ((49112, 49138), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (49119, 49138), True, 'import numpy as np\n'), ((49544, 49585), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (49555, 49585), True, 'import numpy as np\n'), ((49624, 49650), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (49631, 49650), True, 'import numpy as np\n'), ((50055, 50096), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (50066, 50096), True, 'import numpy as np\n'), ((50139, 50165), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (50146, 50165), True, 'import numpy as np\n'), ((50637, 50678), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (50648, 50678), True, 'import numpy as np\n'), ((50723, 50749), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (50730, 50749), True, 'import numpy as np\n'), ((51197, 51238), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (51208, 51238), True, 'import numpy as np\n'), ((51283, 51309), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (51290, 51309), True, 'import numpy as np\n'), ((51789, 51830), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (51800, 51830), True, 'import numpy as np\n'), ((51872, 51898), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (51879, 51898), True, 'import numpy as np\n'), ((52326, 52367), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (52337, 52367), True, 'import numpy as np\n'), ((52403, 52429), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (52410, 52429), True, 'import numpy as np\n'), ((52851, 52892), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (52862, 52892), True, 'import numpy as np\n'), ((52931, 52957), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (52938, 52957), True, 'import numpy as np\n'), ((53370, 53411), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (53381, 53411), True, 'import numpy as np\n'), ((53447, 53473), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (53454, 53473), True, 'import numpy as np\n'), ((53859, 53900), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (53870, 53900), True, 'import numpy as np\n'), ((53940, 53966), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (53947, 53966), True, 'import numpy as np\n'), ((54370, 54411), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (54381, 54411), True, 'import numpy as np\n'), ((54450, 54476), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (54457, 54476), True, 'import numpy as np\n'), ((54873, 54917), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (54884, 54917), True, 'import numpy as np\n'), ((54952, 54978), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (54959, 54978), True, 'import numpy as np\n'), ((55365, 55406), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (55376, 55406), True, 'import numpy as np\n'), ((55443, 55469), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (55450, 55469), True, 'import numpy as np\n'), ((55857, 55898), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (55868, 55898), True, 'import numpy as np\n'), ((55932, 55958), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (55939, 55958), True, 'import numpy as np\n'), ((56325, 56366), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (56336, 56366), True, 'import numpy as np\n'), ((56400, 56426), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (56407, 56426), True, 'import numpy as np\n'), ((56836, 56879), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(0)', 'dtype': 'int'}), '(n_seg, fill_value=0, dtype=int)\n', (56847, 56879), True, 'import numpy as np\n'), ((56919, 56945), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (56926, 56945), True, 'import numpy as np\n'), ((57347, 57391), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (57358, 57391), True, 'import numpy as np\n'), ((57428, 57454), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (57435, 57454), True, 'import numpy as np\n'), ((57849, 57893), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (57860, 57893), True, 'import numpy as np\n'), ((57933, 57959), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (57940, 57959), True, 'import numpy as np\n'), ((58370, 58413), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(0)', 'dtype': 'int'}), '(n_seg, fill_value=0, dtype=int)\n', (58381, 58413), True, 'import numpy as np\n'), ((58451, 58477), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (58458, 58477), True, 'import numpy as np\n'), ((58868, 58911), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(4)', 'dtype': 'int'}), '(n_seg, fill_value=4, dtype=int)\n', (58879, 58911), True, 'import numpy as np\n'), ((58947, 58973), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (58954, 58973), True, 'import numpy as np\n'), ((59358, 59402), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (59369, 59402), True, 'import numpy as np\n'), ((59438, 59464), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (59445, 59464), True, 'import numpy as np\n'), ((59848, 59889), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (59859, 59889), True, 'import numpy as np\n'), ((59925, 59951), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (59932, 59951), True, 'import numpy as np\n'), ((60321, 60362), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (60332, 60362), True, 'import numpy as np\n'), ((60399, 60425), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (60406, 60425), True, 'import numpy as np\n'), ((60801, 60842), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (60812, 60842), True, 'import numpy as np\n'), ((60880, 60906), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (60887, 60906), True, 'import numpy as np\n'), ((61288, 61329), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (61299, 61329), True, 'import numpy as np\n'), ((61368, 61394), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (61375, 61394), True, 'import numpy as np\n'), ((61775, 61819), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': '(-1)', 'dtype': 'int'}), '(n_seg, fill_value=-1, dtype=int)\n', (61786, 61819), True, 'import numpy as np\n'), ((61851, 61877), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (61858, 61877), True, 'import numpy as np\n'), ((62224, 62265), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (62235, 62265), True, 'import numpy as np\n'), ((62300, 62326), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (62307, 62326), True, 'import numpy as np\n'), ((62738, 62779), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (62749, 62779), True, 'import numpy as np\n'), ((62815, 62841), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (62822, 62841), True, 'import numpy as np\n'), ((63212, 63253), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (63223, 63253), True, 'import numpy as np\n'), ((63291, 63317), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (63298, 63317), True, 'import numpy as np\n'), ((66407, 66448), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (66418, 66448), True, 'import numpy as np\n'), ((66487, 66513), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (66494, 66513), True, 'import numpy as np\n'), ((66603, 66644), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (66614, 66644), True, 'import numpy as np\n'), ((66689, 66715), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (66696, 66715), True, 'import numpy as np\n'), ((66817, 66858), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (66828, 66858), True, 'import numpy as np\n'), ((66897, 66923), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (66904, 66923), True, 'import numpy as np\n'), ((67103, 67194), 'numpy.nonzero', 'np.nonzero', (['((Segment_Index_begin[gtx][:-1] >= 0) & (Segment_Index_begin[gtx][1:] >= 0))'], {}), '((Segment_Index_begin[gtx][:-1] >= 0) & (Segment_Index_begin[gtx]\n [1:] >= 0))\n', (67113, 67194), True, 'import numpy as np\n'), ((69081, 69122), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (69092, 69122), True, 'import numpy as np\n'), ((69162, 69188), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (69169, 69188), True, 'import numpy as np\n'), ((69613, 69654), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (69624, 69654), True, 'import numpy as np\n'), ((69700, 69726), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (69707, 69726), True, 'import numpy as np\n'), ((70194, 70235), 'numpy.ma.zeros', 'np.ma.zeros', (['n_seg'], {'fill_value': 'fill_value'}), '(n_seg, fill_value=fill_value)\n', (70205, 70235), True, 'import numpy as np\n'), ((70275, 70301), 'numpy.ones', 'np.ones', (['n_seg'], {'dtype': 'bool'}), '(n_seg, dtype=bool)\n', (70282, 70301), True, 'import numpy as np\n'), ((74144, 74173), 'numpy.array', 'np.array', (["tep[gtx]['tep_tod']"], {}), "(tep[gtx]['tep_tod'])\n", (74152, 74173), True, 'import numpy as np\n'), ((75330, 75360), 'numpy.array', 'np.array', (["tep[gtx]['tx_start']"], {}), "(tep[gtx]['tx_start'])\n", (75338, 75360), True, 'import numpy as np\n'), ((76081, 76109), 'numpy.array', 'np.array', (["tep[gtx]['tx_end']"], {}), "(tep[gtx]['tx_end'])\n", (76089, 76109), True, 'import numpy as np\n'), ((76839, 76875), 'numpy.array', 'np.array', (["tep[gtx]['tx_robust_sprd']"], {}), "(tep[gtx]['tx_robust_sprd'])\n", (76847, 76875), True, 'import numpy as np\n'), ((77625, 77655), 'numpy.array', 'np.array', (["tep[gtx]['sigma_tx']"], {}), "(tep[gtx]['sigma_tx'])\n", (77633, 77655), True, 'import numpy as np\n'), ((78360, 78389), 'numpy.array', 'np.array', (['mean_dead_time[gtx]'], {}), '(mean_dead_time[gtx])\n', (78368, 78389), True, 'import numpy as np\n'), ((90434, 90501), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['geoid'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['geoid'][:], fill_value=fv)\n", (90445, 90501), True, 'import numpy as np\n'), ((92696, 92711), 'numpy.copy', 'np.copy', (['bckgrd'], {}), '(bckgrd)\n', (92703, 92711), True, 'import numpy as np\n'), ((97106, 97243), 'numpy.ma.array', 'np.ma.array', (["IS2_atl09_mds[pfl]['high_rate']['bsnow_od'][1:]"], {'fill_value': "IS2_atl09_attrs[pfl]['high_rate']['bsnow_od']['_FillValue']"}), "(IS2_atl09_mds[pfl]['high_rate']['bsnow_od'][1:], fill_value=\n IS2_atl09_attrs[pfl]['high_rate']['bsnow_od']['_FillValue'])\n", (97117, 97243), True, 'import numpy as np\n'), ((101458, 101595), 'numpy.ma.array', 'np.ma.array', (["IS2_atl09_mds[pfl]['high_rate']['msw_flag'][1:]"], {'fill_value': "IS2_atl09_attrs[pfl]['high_rate']['msw_flag']['_FillValue']"}), "(IS2_atl09_mds[pfl]['high_rate']['msw_flag'][1:], fill_value=\n IS2_atl09_attrs[pfl]['high_rate']['msw_flag']['_FillValue'])\n", (101469, 101595), True, 'import numpy as np\n'), ((104127, 104203), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geolocation']['range_bias_corr'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geolocation']['range_bias_corr'][:], fill_value=fv)\n", (104138, 104203), True, 'import numpy as np\n'), ((105601, 105680), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geolocation']['neutat_delay_total'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geolocation']['neutat_delay_total'][:], fill_value=fv)\n", (105612, 105680), True, 'import numpy as np\n'), ((107108, 107184), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geolocation']['solar_elevation'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geolocation']['solar_elevation'][:], fill_value=fv)\n", (107119, 107184), True, 'import numpy as np\n'), ((108796, 108870), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geolocation']['solar_azimuth'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geolocation']['solar_azimuth'][:], fill_value=fv)\n", (108807, 108870), True, 'import numpy as np\n'), ((110360, 110425), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['dac'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['dac'][:], fill_value=fv)\n", (110371, 110425), True, 'import numpy as np\n'), ((111730, 111802), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['tide_earth'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['tide_earth'][:], fill_value=fv)\n", (111741, 111802), True, 'import numpy as np\n'), ((113035, 113106), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['tide_load'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['tide_load'][:], fill_value=fv)\n", (113046, 113106), True, 'import numpy as np\n'), ((114393, 114465), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['tide_ocean'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['tide_ocean'][:], fill_value=fv)\n", (114404, 114465), True, 'import numpy as np\n'), ((115866, 116004), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['tide_oc_pole'][:]"], {'mask': "(fileID[gtx]['geophys_corr']['tide_oc_pole'][:] == fv)", 'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['tide_oc_pole'][:], mask=fileID[gtx\n ]['geophys_corr']['tide_oc_pole'][:] == fv, fill_value=fv)\n", (115877, 116004), True, 'import numpy as np\n'), ((117295, 117366), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geophys_corr']['tide_pole'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geophys_corr']['tide_pole'][:], fill_value=fv)\n", (117306, 117366), True, 'import numpy as np\n'), ((151400, 151469), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geolocation']['ref_elev'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geolocation']['ref_elev'][:], fill_value=fv)\n", (151411, 151469), True, 'import numpy as np\n'), ((152861, 152933), 'numpy.ma.array', 'np.ma.array', (["fileID[gtx]['geolocation']['ref_azimuth'][:]"], {'fill_value': 'fv'}), "(fileID[gtx]['geolocation']['ref_azimuth'][:], fill_value=fv)\n", (152872, 152933), True, 'import numpy as np\n'), ((155025, 155057), 'os.chmod', 'os.chmod', (['output_file', 'args.mode'], {}), '(output_file, args.mode)\n', (155033, 155057), False, 'import os\n'), ((156422, 156463), 'numpy.copy', 'np.copy', (["fileID[pfl]['high_rate'][key][:]"], {}), "(fileID[pfl]['high_rate'][key][:])\n", (156429, 156463), True, 'import numpy as np\n'), ((156624, 156656), 'numpy.zeros', 'np.zeros', (['n_seg'], {'dtype': 'val.dtype'}), '(n_seg, dtype=val.dtype)\n', (156632, 156656), True, 'import numpy as np\n'), ((168228, 168248), 'numpy.array', 'np.array', (['[tmn, tmx]'], {}), '([tmn, tmx])\n', (168236, 168248), True, 'import numpy as np\n'), ((3737, 3748), 'os.getpid', 'os.getpid', ([], {}), '()\n', (3746, 3748), False, 'import os\n'), ((20380, 20508), 'numpy.nonzero', 'np.nonzero', (['((photon_mframes >= unique_major_frames[iteration] - 1) & (photon_mframes <=\n unique_major_frames[iteration] + 1))'], {}), '((photon_mframes >= unique_major_frames[iteration] - 1) & (\n photon_mframes <= unique_major_frames[iteration] + 1))\n', (20390, 20508), True, 'import numpy as np\n'), ((20605, 20669), 'numpy.nonzero', 'np.nonzero', (['(photon_mframes[i1] == unique_major_frames[iteration])'], {}), '(photon_mframes[i1] == unique_major_frames[iteration])\n', (20615, 20669), True, 'import numpy as np\n'), ((20759, 20884), 'yapc.classify_photons.classify_photons', 'classify_photons', (['x_atc[i1]', 'h_ph[i1]', 'h_win_width', 'i2'], {'K': '(5)', 'MIN_PH': '(5)', 'MIN_XSPREAD': '(1.0)', 'MIN_HSPREAD': '(0.01)', 'METHOD': '"""linear"""'}), "(x_atc[i1], h_ph[i1], h_win_width, i2, K=5, MIN_PH=5,\n MIN_XSPREAD=1.0, MIN_HSPREAD=0.01, METHOD='linear')\n", (20775, 20884), False, 'from yapc.classify_photons import classify_photons\n'), ((154452, 154483), 'os.path.expanduser', 'os.path.expanduser', (['args.output'], {}), '(args.output)\n', (154470, 154483), False, 'import os\n'), ((159535, 159546), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (159543, 159546), True, 'import numpy as np\n'), ((161852, 161863), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (161860, 161863), True, 'import numpy as np\n'), ((166787, 166806), 'os.path.basename', 'os.path.basename', (['i'], {}), '(i)\n', (166803, 166806), False, 'import os\n'), ((3685, 3697), 'os.getppid', 'os.getppid', ([], {}), '()\n', (3695, 3697), False, 'import os\n'), ((6306, 6330), 're.match', 're.match', (['"""gt\\\\d[lr]"""', 'k'], {}), "('gt\\\\d[lr]', k)\n", (6314, 6330), False, 'import re\n'), ((22434, 22494), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['delta_time'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['delta_time'][idx:idx + cnt])\n", (22441, 22494), True, 'import numpy as np\n'), ((22599, 22627), 'numpy.copy', 'np.copy', (['h_ph[idx:idx + cnt]'], {}), '(h_ph[idx:idx + cnt])\n', (22606, 22627), True, 'import numpy as np\n'), ((22903, 22959), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['lat_ph'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['lat_ph'][idx:idx + cnt])\n", (22910, 22959), True, 'import numpy as np\n'), ((22989, 23045), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['lon_ph'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['lon_ph'][idx:idx + cnt])\n", (22996, 23045), True, 'import numpy as np\n'), ((23133, 23196), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['ph_id_channel'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['ph_id_channel'][idx:idx + cnt])\n", (23140, 23196), True, 'import numpy as np\n'), ((23222, 23283), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['ph_id_pulse'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['ph_id_pulse'][idx:idx + cnt])\n", (23229, 23283), True, 'import numpy as np\n'), ((23370, 23434), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['pce_mframe_cnt'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['pce_mframe_cnt'][idx:idx + cnt])\n", (23377, 23434), True, 'import numpy as np\n'), ((23649, 23678), 'numpy.copy', 'np.copy', (['x_atc[idx:idx + cnt]'], {}), '(x_atc[idx:idx + cnt])\n', (23656, 23678), True, 'import numpy as np\n'), ((23712, 23741), 'numpy.copy', 'np.copy', (['y_atc[idx:idx + cnt]'], {}), '(y_atc[idx:idx + cnt])\n', (23719, 23741), True, 'import numpy as np\n'), ((24439, 24506), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['signal_conf_ph'][idx:idx + cnt, 3]"], {}), "(fileID[gtx]['heights']['signal_conf_ph'][idx:idx + cnt, 3])\n", (24446, 24506), True, 'import numpy as np\n'), ((24540, 24574), 'numpy.count_nonzero', 'np.count_nonzero', (['(ice_sig_conf > 1)'], {}), '(ice_sig_conf > 1)\n', (24556, 24574), True, 'import numpy as np\n'), ((24663, 24693), 'numpy.nonzero', 'np.nonzero', (['(ice_sig_conf == -2)'], {}), '(ice_sig_conf == -2)\n', (24673, 24693), True, 'import numpy as np\n'), ((24843, 24866), 'numpy.max', 'np.max', (['segment_weights'], {}), '(segment_weights)\n', (24849, 24866), True, 'import numpy as np\n'), ((24974, 25029), 'numpy.array', 'np.array', (['(100.0 * segment_weights / snr_norm)'], {'dtype': 'int'}), '(100.0 * segment_weights / snr_norm, dtype=int)\n', (24982, 25029), True, 'import numpy as np\n'), ((25074, 25091), 'numpy.copy', 'np.copy', (['snr_norm'], {}), '(snr_norm)\n', (25081, 25091), True, 'import numpy as np\n'), ((25247, 25271), 'numpy.zeros', 'np.zeros', (['cnt'], {'dtype': 'int'}), '(cnt, dtype=int)\n', (25255, 25271), True, 'import numpy as np\n'), ((25634, 25667), 'numpy.count_nonzero', 'np.count_nonzero', (['(pe_sig_conf > 1)'], {}), '(pe_sig_conf > 1)\n', (25650, 25667), True, 'import numpy as np\n'), ((35007, 35067), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['delta_time'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['delta_time'][idx:idx + cnt])\n", (35014, 35067), True, 'import numpy as np\n'), ((35172, 35200), 'numpy.copy', 'np.copy', (['h_ph[idx:idx + cnt]'], {}), '(h_ph[idx:idx + cnt])\n', (35179, 35200), True, 'import numpy as np\n'), ((35614, 35670), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['lat_ph'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['lat_ph'][idx:idx + cnt])\n", (35621, 35670), True, 'import numpy as np\n'), ((35700, 35756), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['lon_ph'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['lon_ph'][idx:idx + cnt])\n", (35707, 35756), True, 'import numpy as np\n'), ((35844, 35907), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['ph_id_channel'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['ph_id_channel'][idx:idx + cnt])\n", (35851, 35907), True, 'import numpy as np\n'), ((35933, 35994), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['ph_id_pulse'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['ph_id_pulse'][idx:idx + cnt])\n", (35940, 35994), True, 'import numpy as np\n'), ((36081, 36145), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['pce_mframe_cnt'][idx:idx + cnt]"], {}), "(fileID[gtx]['heights']['pce_mframe_cnt'][idx:idx + cnt])\n", (36088, 36145), True, 'import numpy as np\n'), ((36360, 36389), 'numpy.copy', 'np.copy', (['x_atc[idx:idx + cnt]'], {}), '(x_atc[idx:idx + cnt])\n', (36367, 36389), True, 'import numpy as np\n'), ((36423, 36452), 'numpy.copy', 'np.copy', (['y_atc[idx:idx + cnt]'], {}), '(y_atc[idx:idx + cnt])\n', (36430, 36452), True, 'import numpy as np\n'), ((37150, 37217), 'numpy.copy', 'np.copy', (["fileID[gtx]['heights']['signal_conf_ph'][idx:idx + cnt, 3]"], {}), "(fileID[gtx]['heights']['signal_conf_ph'][idx:idx + cnt, 3])\n", (37157, 37217), True, 'import numpy as np\n'), ((37251, 37285), 'numpy.count_nonzero', 'np.count_nonzero', (['(ice_sig_conf > 1)'], {}), '(ice_sig_conf > 1)\n', (37267, 37285), True, 'import numpy as np\n'), ((37374, 37404), 'numpy.nonzero', 'np.nonzero', (['(ice_sig_conf == -2)'], {}), '(ice_sig_conf == -2)\n', (37384, 37404), True, 'import numpy as np\n'), ((37554, 37577), 'numpy.max', 'np.max', (['segment_weights'], {}), '(segment_weights)\n', (37560, 37577), True, 'import numpy as np\n'), ((37685, 37709), 'numpy.zeros', 'np.zeros', (['cnt'], {'dtype': 'int'}), '(cnt, dtype=int)\n', (37693, 37709), True, 'import numpy as np\n'), ((37921, 37938), 'numpy.copy', 'np.copy', (['snr_norm'], {}), '(snr_norm)\n', (37928, 37938), True, 'import numpy as np\n'), ((38094, 38118), 'numpy.zeros', 'np.zeros', (['cnt'], {'dtype': 'int'}), '(cnt, dtype=int)\n', (38102, 38118), True, 'import numpy as np\n'), ((38481, 38514), 'numpy.count_nonzero', 'np.count_nonzero', (['(pe_sig_conf > 1)'], {}), '(pe_sig_conf > 1)\n', (38497, 38514), True, 'import numpy as np\n'), ((68778, 68923), 'numpy.sqrt', 'np.sqrt', (['(sigma_geo_h ** 2 + (sigma_geo_along * Segment_dH_along[gtx].data[j]) ** 2 +\n (sigma_geo_across * Distributed_dH_across.data[j]) ** 2)'], {}), '(sigma_geo_h ** 2 + (sigma_geo_along * Segment_dH_along[gtx].data[j]\n ) ** 2 + (sigma_geo_across * Distributed_dH_across.data[j]) ** 2)\n', (68785, 68923), True, 'import numpy as np\n'), ((160824, 160835), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (160832, 160835), True, 'import numpy as np\n'), ((162936, 162947), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (162944, 162947), True, 'import numpy as np\n'), ((4367, 4388), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (4385, 4388), False, 'import os\n'), ((4517, 4538), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (4535, 4538), False, 'import os\n'), ((4712, 4733), 'os.path.expanduser', 'os.path.expanduser', (['p'], {}), '(p)\n', (4730, 4733), False, 'import os\n'), ((47579, 47674), 'numpy.max', 'np.max', (['[Distributed_Height_Error.data[j] ** 2, Distributed_FPB_median_sigma.data[j\n ] ** 2]'], {}), '([Distributed_Height_Error.data[j] ** 2, Distributed_FPB_median_sigma\n .data[j] ** 2])\n', (47585, 47674), True, 'import numpy as np\n'), ((68177, 68274), 'numpy.sqrt', 'np.sqrt', (['(Segment_Land_Ice_Error[gtx].data[j] ** 2 + Segment_Land_Ice_Error[cmp].\n data[j] ** 2)'], {}), '(Segment_Land_Ice_Error[gtx].data[j] ** 2 + Segment_Land_Ice_Error[\n cmp].data[j] ** 2)\n', (68184, 68274), True, 'import numpy as np\n'), ((68307, 68317), 'numpy.abs', 'np.abs', (['dY'], {}), '(dY)\n', (68313, 68317), True, 'import numpy as np\n'), ((23309, 23328), 'numpy.unique', 'np.unique', (['ID_pulse'], {}), '(ID_pulse)\n', (23318, 23328), True, 'import numpy as np\n'), ((27950, 27972), 'numpy.copy', 'np.copy', (["centroid['x']"], {}), "(centroid['x'])\n", (27957, 27972), True, 'import numpy as np\n'), ((28086, 28109), 'numpy.copy', 'np.copy', (['along_X_spread'], {}), '(along_X_spread)\n', (28093, 28109), True, 'import numpy as np\n'), ((28223, 28245), 'numpy.copy', 'np.copy', (["centroid['y']"], {}), "(centroid['y'])\n", (28230, 28245), True, 'import numpy as np\n'), ((29506, 29528), 'numpy.copy', 'np.copy', (["fit['window']"], {}), "(fit['window'])\n", (29513, 29528), True, 'import numpy as np\n'), ((29694, 29713), 'numpy.copy', 'np.copy', (["fit['RDE']"], {}), "(fit['RDE'])\n", (29701, 29713), True, 'import numpy as np\n'), ((30152, 30178), 'numpy.copy', 'np.copy', (["fit['iterations']"], {}), "(fit['iterations'])\n", (30159, 30178), True, 'import numpy as np\n'), ((30295, 30309), 'numpy.copy', 'np.copy', (['valid'], {}), '(valid)\n', (30302, 30309), True, 'import numpy as np\n'), ((30422, 30439), 'numpy.copy', 'np.copy', (['n_pulses'], {}), '(n_pulses)\n', (30429, 30439), True, 'import numpy as np\n'), ((31772, 31889), 'numpy.nonzero', 'np.nonzero', (['((height_residuals >= -Distributed_Window.data[j]) & (height_residuals <=\n Distributed_Window.data[j]))'], {}), '((height_residuals >= -Distributed_Window.data[j]) & (\n height_residuals <= Distributed_Window.data[j]))\n', (31782, 31889), True, 'import numpy as np\n'), ((36020, 36039), 'numpy.unique', 'np.unique', (['ID_pulse'], {}), '(ID_pulse)\n', (36029, 36039), True, 'import numpy as np\n'), ((40798, 40820), 'numpy.copy', 'np.copy', (["centroid['x']"], {}), "(centroid['x'])\n", (40805, 40820), True, 'import numpy as np\n'), ((40934, 40957), 'numpy.copy', 'np.copy', (['along_X_spread'], {}), '(along_X_spread)\n', (40941, 40957), True, 'import numpy as np\n'), ((41071, 41093), 'numpy.copy', 'np.copy', (["centroid['y']"], {}), "(centroid['y'])\n", (41078, 41093), True, 'import numpy as np\n'), ((42230, 42252), 'numpy.copy', 'np.copy', (["fit['window']"], {}), "(fit['window'])\n", (42237, 42252), True, 'import numpy as np\n'), ((42418, 42437), 'numpy.copy', 'np.copy', (["fit['RDE']"], {}), "(fit['RDE'])\n", (42425, 42437), True, 'import numpy as np\n'), ((42876, 42902), 'numpy.copy', 'np.copy', (["fit['iterations']"], {}), "(fit['iterations'])\n", (42883, 42902), True, 'import numpy as np\n'), ((43150, 43167), 'numpy.copy', 'np.copy', (['n_pulses'], {}), '(n_pulses)\n', (43157, 43167), True, 'import numpy as np\n'), ((164504, 164515), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (164512, 164515), True, 'import numpy as np\n'), ((164743, 164754), 'numpy.shape', 'np.shape', (['v'], {}), '(v)\n', (164751, 164754), True, 'import numpy as np\n'), ((27257, 27280), 'numpy.abs', 'np.abs', (["fit['error'][0]"], {}), "(fit['error'][0])\n", (27263, 27280), True, 'import numpy as np\n'), ((30987, 31018), 'numpy.mean', 'np.mean', (['height_residuals[ifit]'], {}), '(height_residuals[ifit])\n', (30994, 31018), True, 'import numpy as np\n'), ((31051, 31084), 'numpy.median', 'np.median', (['height_residuals[ifit]'], {}), '(height_residuals[ifit])\n', (31060, 31084), True, 'import numpy as np\n'), ((32962, 32983), 'numpy.copy', 'np.copy', (["FPB['count']"], {}), "(FPB['count'])\n", (32969, 32983), True, 'import numpy as np\n'), ((40105, 40128), 'numpy.abs', 'np.abs', (["fit['error'][0]"], {}), "(fit['error'][0])\n", (40111, 40128), True, 'import numpy as np\n'), ((43023, 43037), 'numpy.copy', 'np.copy', (['valid'], {}), '(valid)\n', (43030, 43037), True, 'import numpy as np\n'), ((43715, 43746), 'numpy.mean', 'np.mean', (['height_residuals[ifit]'], {}), '(height_residuals[ifit])\n', (43722, 43746), True, 'import numpy as np\n'), ((43779, 43812), 'numpy.median', 'np.median', (['height_residuals[ifit]'], {}), '(height_residuals[ifit])\n', (43788, 43812), True, 'import numpy as np\n'), ((44533, 44650), 'numpy.nonzero', 'np.nonzero', (['((height_residuals >= -Distributed_Window.data[j]) & (height_residuals <=\n Distributed_Window.data[j]))'], {}), '((height_residuals >= -Distributed_Window.data[j]) & (\n height_residuals <= Distributed_Window.data[j]))\n', (44543, 44650), True, 'import numpy as np\n'), ((45698, 45719), 'numpy.copy', 'np.copy', (["FPB['count']"], {}), "(FPB['count'])\n", (45705, 45719), True, 'import numpy as np\n')] |
import os, gzip, pickle, numpy as np
from variational import mean_field_vso, marginal_approx, get_semfunc
from __config__.filepath import AUX_DIR
def get_scoring_fn(pred_wei, pred_bias, C, meanfield_vecs):
"""
Get a scoring function for the relpron dataset
:param pred_wei: weights of semantic functions
:param pred_bias: biases of semantic functions
:param C: total cardinality
:param meanfield_vecs: mean-field vectors for relpron triples
:return: scoring function
"""
# Set up semantic functions
semfuncs = [get_semfunc(pred_wei[i], pred_bias[i]) for i in range(len(pred_wei))]
# Get marginal distributions
marg = [[marginal_approx(prob, C) for prob in triple] for triple in meanfield_vecs]
def scoring_fn(term, description, **kwargs):
"""
Calculate how much the triple implies the term
:param term: noun index
:param description: (index-of-SBJ-or-OBJ, index-of-triple)
:return: probability
"""
which, index = description
return semfuncs[term](marg[index][which])
return scoring_fn
def get_meanfield_fn(pred_wei, pred_bias, link_wei, ent_bias, C, init_vecs):
"""
Get a function mapping vso triples to meanfield vectors
:param pred_wei: weights of semantic functions
:param pred_bias: biases of semantic functions
:param link_wei: link weight matrix
:param ent_bias: entity bias
:param C: total cardinality
:param init_vecs: zero-context mean-field vectors, by pred index
:return: scoring function
"""
# Set up semantic functions
semfuncs = [get_semfunc(pred_wei[i], pred_bias[i]) for i in range(len(pred_wei))]
# Set up constant function
D = pred_wei.shape[1]
constant = get_semfunc(np.zeros(D), 0)
av_ent = np.ones(D) * (C/D)
def meanfield_fn(triple, **kwargs):
"""
Calculate meanfield vectors for the triple.
For OOV items, the semfunc is a constant function.
:param triple: (verb, agent, patient)
:return: probability
"""
sf = []
vecs = []
for i in triple:
if i is None:
sf.append(constant)
vecs.append(av_ent)
else:
sf.append(semfuncs[i])
vecs.append(init_vecs[i])
meanfield_vecs = mean_field_vso(sf, link_wei, ent_bias, C=C, vecs=vecs, **kwargs)
return meanfield_vecs
return meanfield_fn
# TODO (above two functions): allow decreasing the bias for hypernyms
# TODO (top function only): combine with normal vectors for relatedness (could also rank separately and combine ranks)
def get_baseline_scoring_fn(pred_wei, pred_bias, C, ent_vecs):
"""
Get a scoring function for the relpron dataset
:param pred_wei: weights of semantic functions
:param pred_bias: biases of semantic functions
:param ent_vecs: zero-context mean-field vectors, by pred index
:return: scoring function
"""
# Set up semantic functions
semfuncs = [get_semfunc(pred_wei[i], pred_bias[i]) for i in range(len(pred_wei))]
def score(term, description, **kwargs):
"""
Calculate how much the triple implies the target
:param term: noun index
:param description: (SBJ-or-OBJ, (verb, agent, patient))
:return: probability
"""
which, triple = description
if which == 'SBJ':
i = 1
elif which == 'OBJ':
i = 2
else:
raise ValueError(which)
marg = marginal_approx(ent_vecs[triple[i]], C)
return semfuncs[term](marg)
return score
def load_model(name, pred_wei_dir='simplevec_all', link_wei_dir='meanfield_link', meanfield_dir='meanfield_all', basic_length=8, meanfield_length=13, C_index=9):
"""
Load a model from file
:param name: filename of full model (without file extension)
:param pred_wei_dir: directory for pred weights
:param link_wei_dir: directory for link weights
:param meanfield_dir: directory for meanfield vectors
:param basic_length: number of settings for predicate weights
:param meanfield_length: number of settings for meanfield vectors and biases
:param C_index: index of setting for cardinality
:return: pred_wei, pred_bias, link_wei, ent_bias, C, init_vecs
"""
parts = name.split('-')
basic_name = '-'.join(parts[:basic_length])
meanfield_name = '-'.join(parts[:meanfield_length])
C = int(parts[C_index])
with gzip.open(os.path.join(AUX_DIR, pred_wei_dir, basic_name+'.pkl.gz'), 'rb') as f:
pred_wei = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, meanfield_dir, meanfield_name+'.pkl.gz'), 'rb') as f:
init_vecs = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, meanfield_dir, meanfield_name+'-bias.pkl.gz'), 'rb') as f:
pred_bias = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, link_wei_dir, name+'.pkl.gz'), 'rb') as f:
link_wei = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, link_wei_dir, name+'-bias.pkl.gz'), 'rb') as f:
ent_bias = pickle.load(f)
return pred_wei, pred_bias, link_wei, ent_bias, C, init_vecs
def load_baseline_model(name, pred_wei_dir='simplevec_all', meanfield_dir='meanfield_all', basic_length=8, C_index=9):
"""
Load a model from file
:param name: filename of full model (without file extension)
:param pred_wei_dir: directory for pred weights
:param link_wei_dir: directory for link weights
:param meanfield_dir: directory for meanfield vectors
:param basic_length: number of settings for predicate weights
:param meanfield_length: number of settings for meanfield vectors and biases
:param C_index: index of setting for cardinality
:return: pred_wei, pred_bias, link_wei, ent_bias, C, init_vecs
"""
parts = name.split('-')
basic_name = '-'.join(parts[:basic_length])
C = int(parts[C_index])
with gzip.open(os.path.join(AUX_DIR, pred_wei_dir, basic_name+'.pkl.gz'), 'rb') as f:
pred_wei = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, meanfield_dir, name+'.pkl.gz'), 'rb') as f:
init_vecs = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, meanfield_dir, name+'-bias.pkl.gz'), 'rb') as f:
pred_bias = pickle.load(f)
return pred_wei, pred_bias, C, init_vecs
def load_weights_and_vectors(name, pred_wei_dir='simplevec_all', bias_dir='meanfield_all', meanfield_dir='meanfield_relpron', basic_length=8, bias_length=13, C_index=9):
"""
Load a scoring function from file
:param name: filename of full model (without file extension)
:param pred_wei_dir: directory for pred weights
:param bias_dir: direcotory for biases
:param meanfield_dir: directory for meanfield vectors
:param basic_length: number of settings for predicate weights
:param bias_length: number of settings for biases
:param C_index: index of setting for cardinality
:return: pred_weights, pred_biases, cardinality, meanfield_vectors
"""
parts = name.split('-')
basic_name = '-'.join(parts[:basic_length])
bias_name = '-'.join(parts[:bias_length])
C = int(parts[C_index])
with gzip.open(os.path.join(AUX_DIR, pred_wei_dir, basic_name+'.pkl.gz'), 'rb') as f:
pred_wei = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, bias_dir, bias_name+'-bias.pkl.gz'), 'rb') as f:
pred_bias = pickle.load(f)
with gzip.open(os.path.join(AUX_DIR, meanfield_dir, name+'.pkl.gz'), 'rb') as f:
vecs = pickle.load(f)
return pred_wei, pred_bias, C, vecs
def load_scoring_fn(*args, **kwargs):
"""
Load a scoring function from file
Arguments are passed to load_weights_and_vectors
:return: scoring function
"""
return get_scoring_fn(*load_weights_and_vectors(*args, **kwargs))
def load_baseline_scoring_fn(*args, **kwargs):
"""
Load a baseline scoring function from file.
All arguments are passed to load_baseline_model
:return: scoring function
"""
return get_baseline_scoring_fn(*load_baseline_model(*args, **kwargs))
if __name__ == "__main__":
from testing import get_relpron_separated, get_GS2011_indexed, load_freq_lookup_dicts
LINK_DIR = 'meanfield_link'
#OUTPUT_DIR = 'meanfield_relpron'
OUTPUT_DIR = 'meanfield_gs2011'
# Choose dataset
#raw_triples, _ = get_relpron_separated()
#raw_triples, _ = get_relpron_separated(True)
#raw_triples = [t for _,t in raw_triples]
raw_triples, _ = get_GS2011_indexed()
# Convert to indices
lookup = load_freq_lookup_dicts()
triples = [(lookup['v'].get(verb),
lookup['n'].get(agent),
lookup['n'].get(patient))
for verb, agent, patient in raw_triples]
def apply_model(filename, bias_shift):
"Calculate meanfield vectors for a given model"
fullname = filename + '-' + str(bias_shift).replace('.','_').replace('-','~')
# Skip files that have already been processed
if os.path.exists(os.path.join(AUX_DIR, OUTPUT_DIR, fullname+'.pkl.gz')):
return
# Load model
print('loading', filename, bias_shift)
params = list(load_model(filename, link_wei_dir=LINK_DIR))
params[3] -= bias_shift
meanfield_fn = get_meanfield_fn(*params)
# Get meanfield vectors
print('calculating', fullname)
vecs = [meanfield_fn(t, max_iter=500) for t in triples]
# Save vectors
print('saving', fullname)
with gzip.open(os.path.join(AUX_DIR, OUTPUT_DIR, fullname+'.pkl.gz'), 'wb') as f:
pickle.dump(vecs, f)
# Process files
from multiprocessing import Pool
from random import shuffle
from itertools import product
files = []
for fullname in os.listdir(os.path.join(AUX_DIR, LINK_DIR)):
name = fullname.split('.')[0]
settings = name.split('-')
# Only consider link weight files
if settings[-1] in ['raw', 'bias']:
continue
# Only load the models we want
if all([settings[2] == '1000',
settings[13] == '0_5',
settings[14] == '0_2']):
files.append(name)
files_and_shifts = list(product(files, [0]))
shuffle(files_and_shifts)
with Pool(5) as p:
p.starmap(apply_model, files_and_shifts)
| [
"variational.marginal_approx",
"variational.get_semfunc",
"random.shuffle",
"numpy.ones",
"variational.mean_field_vso",
"pickle.dump",
"testing.get_GS2011_indexed",
"itertools.product",
"pickle.load",
"os.path.join",
"numpy.zeros",
"testing.load_freq_lookup_dicts",
"multiprocessing.Pool"
] | [((8629, 8649), 'testing.get_GS2011_indexed', 'get_GS2011_indexed', ([], {}), '()\n', (8647, 8649), False, 'from testing import get_relpron_separated, get_GS2011_indexed, load_freq_lookup_dicts\n'), ((8697, 8721), 'testing.load_freq_lookup_dicts', 'load_freq_lookup_dicts', ([], {}), '()\n', (8719, 8721), False, 'from testing import get_relpron_separated, get_GS2011_indexed, load_freq_lookup_dicts\n'), ((10409, 10434), 'random.shuffle', 'shuffle', (['files_and_shifts'], {}), '(files_and_shifts)\n', (10416, 10434), False, 'from random import shuffle\n'), ((553, 591), 'variational.get_semfunc', 'get_semfunc', (['pred_wei[i]', 'pred_bias[i]'], {}), '(pred_wei[i], pred_bias[i])\n', (564, 591), False, 'from variational import mean_field_vso, marginal_approx, get_semfunc\n'), ((1625, 1663), 'variational.get_semfunc', 'get_semfunc', (['pred_wei[i]', 'pred_bias[i]'], {}), '(pred_wei[i], pred_bias[i])\n', (1636, 1663), False, 'from variational import mean_field_vso, marginal_approx, get_semfunc\n'), ((1779, 1790), 'numpy.zeros', 'np.zeros', (['D'], {}), '(D)\n', (1787, 1790), True, 'import os, gzip, pickle, numpy as np\n'), ((1808, 1818), 'numpy.ones', 'np.ones', (['D'], {}), '(D)\n', (1815, 1818), True, 'import os, gzip, pickle, numpy as np\n'), ((2364, 2428), 'variational.mean_field_vso', 'mean_field_vso', (['sf', 'link_wei', 'ent_bias'], {'C': 'C', 'vecs': 'vecs'}), '(sf, link_wei, ent_bias, C=C, vecs=vecs, **kwargs)\n', (2378, 2428), False, 'from variational import mean_field_vso, marginal_approx, get_semfunc\n'), ((3057, 3095), 'variational.get_semfunc', 'get_semfunc', (['pred_wei[i]', 'pred_bias[i]'], {}), '(pred_wei[i], pred_bias[i])\n', (3068, 3095), False, 'from variational import mean_field_vso, marginal_approx, get_semfunc\n'), ((3576, 3615), 'variational.marginal_approx', 'marginal_approx', (['ent_vecs[triple[i]]', 'C'], {}), '(ent_vecs[triple[i]], C)\n', (3591, 3615), False, 'from variational import mean_field_vso, marginal_approx, get_semfunc\n'), ((4653, 4667), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4664, 4667), False, 'import os, gzip, pickle, numpy as np\n'), ((4783, 4797), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4794, 4797), False, 'import os, gzip, pickle, numpy as np\n'), ((4918, 4932), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (4929, 4932), False, 'import os, gzip, pickle, numpy as np\n'), ((5036, 5050), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5047, 5050), False, 'import os, gzip, pickle, numpy as np\n'), ((5159, 5173), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (5170, 5173), False, 'import os, gzip, pickle, numpy as np\n'), ((6124, 6138), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6135, 6138), False, 'import os, gzip, pickle, numpy as np\n'), ((6244, 6258), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6255, 6258), False, 'import os, gzip, pickle, numpy as np\n'), ((6369, 6383), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (6380, 6383), False, 'import os, gzip, pickle, numpy as np\n'), ((7390, 7404), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7401, 7404), False, 'import os, gzip, pickle, numpy as np\n'), ((7515, 7529), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7526, 7529), False, 'import os, gzip, pickle, numpy as np\n'), ((7630, 7644), 'pickle.load', 'pickle.load', (['f'], {}), '(f)\n', (7641, 7644), False, 'import os, gzip, pickle, numpy as np\n'), ((9953, 9984), 'os.path.join', 'os.path.join', (['AUX_DIR', 'LINK_DIR'], {}), '(AUX_DIR, LINK_DIR)\n', (9965, 9984), False, 'import os, gzip, pickle, numpy as np\n'), ((10384, 10403), 'itertools.product', 'product', (['files', '[0]'], {}), '(files, [0])\n', (10391, 10403), False, 'from itertools import product\n'), ((10449, 10456), 'multiprocessing.Pool', 'Pool', (['(5)'], {}), '(5)\n', (10453, 10456), False, 'from multiprocessing import Pool\n'), ((669, 693), 'variational.marginal_approx', 'marginal_approx', (['prob', 'C'], {}), '(prob, C)\n', (684, 693), False, 'from variational import mean_field_vso, marginal_approx, get_semfunc\n'), ((4563, 4622), 'os.path.join', 'os.path.join', (['AUX_DIR', 'pred_wei_dir', "(basic_name + '.pkl.gz')"], {}), "(AUX_DIR, pred_wei_dir, basic_name + '.pkl.gz')\n", (4575, 4622), False, 'import os, gzip, pickle, numpy as np\n'), ((4687, 4751), 'os.path.join', 'os.path.join', (['AUX_DIR', 'meanfield_dir', "(meanfield_name + '.pkl.gz')"], {}), "(AUX_DIR, meanfield_dir, meanfield_name + '.pkl.gz')\n", (4699, 4751), False, 'import os, gzip, pickle, numpy as np\n'), ((4817, 4886), 'os.path.join', 'os.path.join', (['AUX_DIR', 'meanfield_dir', "(meanfield_name + '-bias.pkl.gz')"], {}), "(AUX_DIR, meanfield_dir, meanfield_name + '-bias.pkl.gz')\n", (4829, 4886), False, 'import os, gzip, pickle, numpy as np\n'), ((4952, 5005), 'os.path.join', 'os.path.join', (['AUX_DIR', 'link_wei_dir', "(name + '.pkl.gz')"], {}), "(AUX_DIR, link_wei_dir, name + '.pkl.gz')\n", (4964, 5005), False, 'import os, gzip, pickle, numpy as np\n'), ((5070, 5128), 'os.path.join', 'os.path.join', (['AUX_DIR', 'link_wei_dir', "(name + '-bias.pkl.gz')"], {}), "(AUX_DIR, link_wei_dir, name + '-bias.pkl.gz')\n", (5082, 5128), False, 'import os, gzip, pickle, numpy as np\n'), ((6034, 6093), 'os.path.join', 'os.path.join', (['AUX_DIR', 'pred_wei_dir', "(basic_name + '.pkl.gz')"], {}), "(AUX_DIR, pred_wei_dir, basic_name + '.pkl.gz')\n", (6046, 6093), False, 'import os, gzip, pickle, numpy as np\n'), ((6158, 6212), 'os.path.join', 'os.path.join', (['AUX_DIR', 'meanfield_dir', "(name + '.pkl.gz')"], {}), "(AUX_DIR, meanfield_dir, name + '.pkl.gz')\n", (6170, 6212), False, 'import os, gzip, pickle, numpy as np\n'), ((6278, 6337), 'os.path.join', 'os.path.join', (['AUX_DIR', 'meanfield_dir', "(name + '-bias.pkl.gz')"], {}), "(AUX_DIR, meanfield_dir, name + '-bias.pkl.gz')\n", (6290, 6337), False, 'import os, gzip, pickle, numpy as np\n'), ((7300, 7359), 'os.path.join', 'os.path.join', (['AUX_DIR', 'pred_wei_dir', "(basic_name + '.pkl.gz')"], {}), "(AUX_DIR, pred_wei_dir, basic_name + '.pkl.gz')\n", (7312, 7359), False, 'import os, gzip, pickle, numpy as np\n'), ((7424, 7483), 'os.path.join', 'os.path.join', (['AUX_DIR', 'bias_dir', "(bias_name + '-bias.pkl.gz')"], {}), "(AUX_DIR, bias_dir, bias_name + '-bias.pkl.gz')\n", (7436, 7483), False, 'import os, gzip, pickle, numpy as np\n'), ((7549, 7603), 'os.path.join', 'os.path.join', (['AUX_DIR', 'meanfield_dir', "(name + '.pkl.gz')"], {}), "(AUX_DIR, meanfield_dir, name + '.pkl.gz')\n", (7561, 7603), False, 'import os, gzip, pickle, numpy as np\n'), ((9169, 9224), 'os.path.join', 'os.path.join', (['AUX_DIR', 'OUTPUT_DIR', "(fullname + '.pkl.gz')"], {}), "(AUX_DIR, OUTPUT_DIR, fullname + '.pkl.gz')\n", (9181, 9224), False, 'import os, gzip, pickle, numpy as np\n'), ((9754, 9774), 'pickle.dump', 'pickle.dump', (['vecs', 'f'], {}), '(vecs, f)\n', (9765, 9774), False, 'import os, gzip, pickle, numpy as np\n'), ((9675, 9730), 'os.path.join', 'os.path.join', (['AUX_DIR', 'OUTPUT_DIR', "(fullname + '.pkl.gz')"], {}), "(AUX_DIR, OUTPUT_DIR, fullname + '.pkl.gz')\n", (9687, 9730), False, 'import os, gzip, pickle, numpy as np\n')] |
from __future__ import print_function
import os
import argparse
import torch
import torch.backends.cudnn as cudnn
import numpy as np
from prior_box import PriorBox
import cv2
from models.retinaface import RetinaFace
from utils.box_utils import *
import time
import sys
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
sys.path.append(os.path.realpath(__file__).replace(__file__, ''))
parser = argparse.ArgumentParser(description='Retinaface')
parser.add_argument('-m', '--trained_model', default = 'weights/mobilenet_Final.pth',
type=str, help='Trained state_dict file path to open')
parser.add_argument('--network', default='mobile0.25', help='Backbone network mobile0.25 or resnet50')
parser.add_argument('--cpu', action="store_true", default=False, help='Use cpu inference')
parser.add_argument('--confidence_threshold', default=0.7, type=float, help='confidence_threshold')
parser.add_argument('--top_k', default=5000, type=int, help='top_k')
parser.add_argument('--nms_threshold', default=0.4, type=float, help='nms_threshold')
parser.add_argument('--keep_top_k', default=750, type=int, help='keep_top_k')
parser.add_argument('-s', '--save_image', action="store_true", default=True, help='show detection results')
args = parser.parse_args()
def check_keys(model, pretrained_state_dict):
ckpt_keys = set(pretrained_state_dict.keys())
model_keys = set(model.state_dict().keys())
used_pretrained_keys = model_keys & ckpt_keys
unused_pretrained_keys = ckpt_keys - model_keys
missing_keys = model_keys - ckpt_keys
print('Missing keys:{}'.format(len(missing_keys)))
print('Unused checkpoint keys:{}'.format(len(unused_pretrained_keys)))
print('Used keys:{}'.format(len(used_pretrained_keys)))
assert len(used_pretrained_keys) > 0, 'load NONE from pretrained checkpoint'
return True
def remove_prefix(state_dict, prefix):
''' Old style model is stored with all names of parameters sharing common prefix 'module.' '''
print('remove prefix \'{}\''.format(prefix))
f = lambda x: x.split(prefix, 1)[-1] if x.startswith(prefix) else x
return {f(key): value for key, value in state_dict.items()}
def load_model(model, pretrained_path, load_to_cpu):
print('Loading pretrained model from {}'.format(pretrained_path))
if load_to_cpu:
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage)
else:
device = torch.cuda.current_device()
pretrained_dict = torch.load(pretrained_path, map_location=lambda storage, loc: storage.cuda(device))
if "state_dict" in pretrained_dict.keys():
pretrained_dict = remove_prefix(pretrained_dict['state_dict'], 'module.')
else:
pretrained_dict = remove_prefix(pretrained_dict, 'module.')
check_keys(model, pretrained_dict)
model.load_state_dict(pretrained_dict, strict=False)
return model
def single_class_non_max_suppression(bboxes, confidences, conf_thresh=0.6, iou_thresh=0.5, keep_top_k=-1):
'''
do nms on single class.
Hint: for the specific class, given the bbox and its confidence,
1) sort the bbox according to the confidence from top to down, we call this a set
2) select the bbox with the highest confidence, remove it from set, and do IOU calculate with the rest bbox
3) remove the bbox whose IOU is higher than the iou_thresh from the set,
4) loop step 2 and 3, util the set is empty.
:param bboxes: numpy array of 2D, [num_bboxes, 4]
:param confidences: numpy array of 1D. [num_bboxes]
:param conf_thresh:
:param iou_thresh:
:param keep_top_k:
:return:
'''
if len(bboxes) == 0:
return []
conf_keep_idx = np.where(confidences > conf_thresh)[0]
bboxes = bboxes[conf_keep_idx]
confidences = confidences[conf_keep_idx]
pick = []
xmin = bboxes[:, 0]
ymin = bboxes[:, 1]
xmax = bboxes[:, 2]
ymax = bboxes[:, 3]
area = (xmax - xmin + 1e-3) * (ymax - ymin + 1e-3)
idxs = np.argsort(confidences)
while len(idxs) > 0:
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# keep top k
if keep_top_k != -1:
if len(pick) >= keep_top_k:
break
overlap_xmin = np.maximum(xmin[i], xmin[idxs[:last]])
overlap_ymin = np.maximum(ymin[i], ymin[idxs[:last]])
overlap_xmax = np.minimum(xmax[i], xmax[idxs[:last]])
overlap_ymax = np.minimum(ymax[i], ymax[idxs[:last]])
overlap_w = np.maximum(0, overlap_xmax - overlap_xmin)
overlap_h = np.maximum(0, overlap_ymax - overlap_ymin)
overlap_area = overlap_w * overlap_h
overlap_ratio = overlap_area / (area[idxs[:last]] + area[i] - overlap_area)
need_to_be_deleted_idx = np.concatenate(([last], np.where(overlap_ratio > iou_thresh)[0]))
idxs = np.delete(idxs, need_to_be_deleted_idx)
return conf_keep_idx[pick]
if __name__ == '__main__':
torch.set_grad_enabled(False)
cfg = None
cfg = {
'name': 'mobilenet',
# 'name': 'resnet18',
'min_sizes': [[16, 32], [64, 128], [256, 512]],
'steps': [8, 16, 32],
'variance': [0.1, 0.2],
'clip': False,
'loc_weight': 2.0,
'gpu_train': True,
'batch_size': 1,
'ngpu': 1,
'image_size': 640,
'pretrain': True,
'return_layers': {'stage1': 1, 'stage2': 2, 'stage3': 3},
'in_channel': 32,
'out_channel': 64
}
root_path = os.path.realpath(__file__).replace("test.py", "")
model_path = root_path + args.trained_model
# net and model
net = RetinaFace(cfg=cfg, phase = 'test')
net = load_model(net, model_path, args.cpu)
net.eval()
print('Finished loading model!')
print(net)
cudnn.benchmark = True
device = torch.device("cpu" if args.cpu else "cuda")
net = net.to(device)
resize = 1
ims = os.listdir('/home/videos/051209')
# im_path = [os.path.join('/home/videos/051209', im) for im in ims]
im_path = ['/root/face_mask_lmks_detection/test.jpg']
# testing begin
for image_path in im_path:
#image_path = "/root/face_mask_lmks_detection/test.jpg"
img_raw = cv2.imread(image_path, cv2.IMREAD_COLOR)
img = np.float32(img_raw)
im_height, im_width, _ = img.shape
scale = torch.Tensor([img.shape[1], img.shape[0], img.shape[1], img.shape[0]]) # w h w h
img -= (104, 117, 123)
img = img.transpose(2, 0, 1)
img = torch.from_numpy(img).unsqueeze(0)
img = img.to(device)
scale = scale.to(device)
tic = time.time()
loc, conf, landms = net(img) # forward pass
print('net forward time: {:.4f}'.format(time.time() - tic))
priorbox = PriorBox(cfg, image_size=(im_height, im_width))
priors = priorbox.forward()
priors = priors.to(device)
prior_data = priors.data
boxes = decode(loc.data.squeeze(0), prior_data, cfg['variance'])
boxes = boxes * scale / resize
boxes = boxes.cpu().numpy()
# remove batch dim, as test only for single img
scores = conf.squeeze(0).data.cpu().numpy()[:, 1:] # conf : batch, num anchors, 3
landms = decode_landm(landms.data.squeeze(0), prior_data, cfg['variance'])
scale1 = torch.Tensor([img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2], img.shape[3], img.shape[2],
img.shape[3], img.shape[2]])
scale1 = scale1.to(device)
landms = landms * scale1 / resize
landms = landms.cpu().numpy()
# ignore low scores
# inds = np.where(scores > args.confidence_threshold)[0]
# boxes = boxes[inds]
# landms = landms[inds]
# scores = scores[inds] # 1, num_anchors, 2
# # keep top-K before NMS, cos there are 2 different label, split them then concat
# tem_scores = []
# tem_boexes = []
# tem_landms = []
# for i in range(scores.shape[-1]):
# per_cls_scores = scores[..., i]
# per_cls_boxes = boxes[..., i]
# pre_cls_landms = landms[..., i]
# # keep top-K before NMS
# order = per_cls_scores.argsort()[::-1][:args.top_k]
# per_cls_boxes = per_cls_boxes[order]
# pre_cls_landms = pre_cls_landms[order]
# per_cls_scores = per_cls_scores[order]
# tem_scores.append(per_cls_scores)
# tem_boexes.append(per_cls_boxes)
# tem_landms.append(pre_cls_landms)
# conbine per_cls to a big array
# scores = np.concatnate(tem_scores, 0)
# boxes = np.concatnate(tem_boexes, 0)
# landms = np.concatnate(tem_landms, 0)
# we need to max scores for each anchor
labels = np.argmax(scores, axis=-1)
scores = np.max(scores, axis=-1) # scores : number anchors,
if len(scores)==0:
continue
keep_idx = single_class_non_max_suppression(boxes, scores, 0.6, 0.5)
for idx in keep_idx:
conf = float(scores[idx])
class_id = labels[idx]
bbox = boxes[idx]
landm = landms[idx]
text = "{:.4f}".format(conf)
# clip the coordinate, avoid the value exceed the image boundary.
xmin = max(0, int(bbox[0]))
ymin = max(0, int(bbox[1]))
xmax = min(int(bbox[2]), im_width)
ymax = min(int(bbox[3]), im_height)
if int(class_id) == 1:
color = (0,255,0)
else:
color = (0, 0, 255)
cv2.rectangle(img_raw, (xmin, ymin), (xmax, ymax), color, 2)
cv2.putText(img_raw, text, (xmin, ymin+12),
cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# landms
cv2.circle(img_raw, (landm[0], landm[1]), 1, (0, 0, 255), 2)
cv2.circle(img_raw, (landm[2], landm[3]), 1, (0, 255, 255), 2)
cv2.circle(img_raw, (landm[4], landm[5]), 1, (255, 0, 255), 2)
cv2.circle(img_raw, (landm[6], landm[7]), 1, (0, 255, 0), 2)
cv2.circle(img_raw, (landm[8], landm[9]), 1, (255, 0, 0), 2)
# save image
# name = "test.jpg"
cv2.imwrite('./result.jpg', img_raw)
# print(scores)
# # do multi cls NMS
# dets = np.hstack((boxes, scores[:, np.newaxis], labels[:, np.newaxis])).astype(np.float32, copy=False)
# face_idx = np.where(labels==0)
# face_dets = dets[face_idx]
# face_landms = landms[face_idx]
# mask_idx = np.where(labels==1)
# mask_dets = dets[mask_idx]
# mask_landms = landms[mask_idx]
# face_keep = py_cpu_nms(face_dets, args.nms_threshold)
# mask_keep = py_cpu_nms(mask_dets, args.nms_threshold)
# face_dets = face_dets[face_keep,:]
# face_landms = face_landms[face_keep,:]
# mask_dets = mask_dets[mask_keep,:]
# mask_landms = mask_landms[mask_keep,:]
# # dets = dets[keep, :]
# # landms = landms[keep]
# # keep top-K faster NMS
# # dets = dets[:args.keep_top_k, :]
# # landms = landms[:args.keep_top_k, :]
# dets = np.concatenate((face_dets, mask_dets), axis=0)
# landms = np.concatenate((face_landms, mask_landms), axis=0)
# dets = np.concatenate((dets, landms), axis=1)
# show image
# if args.save_image:
# for b in dets:
# text = "{:.4f}".format(b[4])
# b = list(map(int, b))
# if int(b[5]) == 1:
# color = (0,255,0)
# else:
# color = (0, 0, 255)
# cv2.rectangle(img_raw, (b[0], b[1]), (b[2], b[3]), color, 2)
# cx = b[0]
# cy = b[1] + 12
# cv2.putText(img_raw, text, (cx, cy),
# cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 255, 255))
# # landms
# cv2.circle(img_raw, (b[6], b[7]), 1, (0, 0, 255), 2)
# cv2.circle(img_raw, (b[8], b[9]), 1, (0, 255, 255), 2)
# cv2.circle(img_raw, (b[10], b[11]), 1, (255, 0, 255), 2)
# cv2.circle(img_raw, (b[12], b[13]), 1, (0, 255, 0), 2)
# cv2.circle(img_raw, (b[14], b[15]), 1, (255, 0, 0), 2)
# # save image
# # name = "test.jpg"
# cv2.imwrite('/home/dd/'+image_path.split('/')[-1].replace('.jpg', '_2.jpg'), img_raw)
# cv2.imwrite(image_path.split('/')[-1], img_raw)
| [
"cv2.rectangle",
"prior_box.PriorBox",
"torch.from_numpy",
"numpy.argsort",
"models.retinaface.RetinaFace",
"os.listdir",
"argparse.ArgumentParser",
"numpy.where",
"numpy.delete",
"numpy.max",
"numpy.maximum",
"torch.cuda.current_device",
"torch.Tensor",
"numpy.argmax",
"cv2.putText",
... | [((387, 436), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Retinaface"""'}), "(description='Retinaface')\n", (410, 436), False, 'import argparse\n'), ((3994, 4017), 'numpy.argsort', 'np.argsort', (['confidences'], {}), '(confidences)\n', (4004, 4017), True, 'import numpy as np\n'), ((4956, 4985), 'torch.set_grad_enabled', 'torch.set_grad_enabled', (['(False)'], {}), '(False)\n', (4978, 4985), False, 'import torch\n'), ((5576, 5609), 'models.retinaface.RetinaFace', 'RetinaFace', ([], {'cfg': 'cfg', 'phase': '"""test"""'}), "(cfg=cfg, phase='test')\n", (5586, 5609), False, 'from models.retinaface import RetinaFace\n'), ((5767, 5810), 'torch.device', 'torch.device', (["('cpu' if args.cpu else 'cuda')"], {}), "('cpu' if args.cpu else 'cuda')\n", (5779, 5810), False, 'import torch\n'), ((5863, 5896), 'os.listdir', 'os.listdir', (['"""/home/videos/051209"""'], {}), "('/home/videos/051209')\n", (5873, 5896), False, 'import os\n'), ((2335, 2405), 'torch.load', 'torch.load', (['pretrained_path'], {'map_location': '(lambda storage, loc: storage)'}), '(pretrained_path, map_location=lambda storage, loc: storage)\n', (2345, 2405), False, 'import torch\n'), ((2433, 2460), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (2458, 2460), False, 'import torch\n'), ((3696, 3731), 'numpy.where', 'np.where', (['(confidences > conf_thresh)'], {}), '(confidences > conf_thresh)\n', (3704, 3731), True, 'import numpy as np\n'), ((4256, 4294), 'numpy.maximum', 'np.maximum', (['xmin[i]', 'xmin[idxs[:last]]'], {}), '(xmin[i], xmin[idxs[:last]])\n', (4266, 4294), True, 'import numpy as np\n'), ((4318, 4356), 'numpy.maximum', 'np.maximum', (['ymin[i]', 'ymin[idxs[:last]]'], {}), '(ymin[i], ymin[idxs[:last]])\n', (4328, 4356), True, 'import numpy as np\n'), ((4380, 4418), 'numpy.minimum', 'np.minimum', (['xmax[i]', 'xmax[idxs[:last]]'], {}), '(xmax[i], xmax[idxs[:last]])\n', (4390, 4418), True, 'import numpy as np\n'), ((4442, 4480), 'numpy.minimum', 'np.minimum', (['ymax[i]', 'ymax[idxs[:last]]'], {}), '(ymax[i], ymax[idxs[:last]])\n', (4452, 4480), True, 'import numpy as np\n'), ((4501, 4543), 'numpy.maximum', 'np.maximum', (['(0)', '(overlap_xmax - overlap_xmin)'], {}), '(0, overlap_xmax - overlap_xmin)\n', (4511, 4543), True, 'import numpy as np\n'), ((4564, 4606), 'numpy.maximum', 'np.maximum', (['(0)', '(overlap_ymax - overlap_ymin)'], {}), '(0, overlap_ymax - overlap_ymin)\n', (4574, 4606), True, 'import numpy as np\n'), ((4851, 4890), 'numpy.delete', 'np.delete', (['idxs', 'need_to_be_deleted_idx'], {}), '(idxs, need_to_be_deleted_idx)\n', (4860, 4890), True, 'import numpy as np\n'), ((6166, 6206), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_COLOR'], {}), '(image_path, cv2.IMREAD_COLOR)\n', (6176, 6206), False, 'import cv2\n'), ((6222, 6241), 'numpy.float32', 'np.float32', (['img_raw'], {}), '(img_raw)\n', (6232, 6241), True, 'import numpy as np\n'), ((6302, 6372), 'torch.Tensor', 'torch.Tensor', (['[img.shape[1], img.shape[0], img.shape[1], img.shape[0]]'], {}), '([img.shape[1], img.shape[0], img.shape[1], img.shape[0]])\n', (6314, 6372), False, 'import torch\n'), ((6577, 6588), 'time.time', 'time.time', ([], {}), '()\n', (6586, 6588), False, 'import time\n'), ((6730, 6777), 'prior_box.PriorBox', 'PriorBox', (['cfg'], {'image_size': '(im_height, im_width)'}), '(cfg, image_size=(im_height, im_width))\n', (6738, 6777), False, 'from prior_box import PriorBox\n'), ((7296, 7460), 'torch.Tensor', 'torch.Tensor', (['[img.shape[3], img.shape[2], img.shape[3], img.shape[2], img.shape[3], img.\n shape[2], img.shape[3], img.shape[2], img.shape[3], img.shape[2]]'], {}), '([img.shape[3], img.shape[2], img.shape[3], img.shape[2], img.\n shape[3], img.shape[2], img.shape[3], img.shape[2], img.shape[3], img.\n shape[2]])\n', (7308, 7460), False, 'import torch\n'), ((8853, 8879), 'numpy.argmax', 'np.argmax', (['scores'], {'axis': '(-1)'}), '(scores, axis=-1)\n', (8862, 8879), True, 'import numpy as np\n'), ((8897, 8920), 'numpy.max', 'np.max', (['scores'], {'axis': '(-1)'}), '(scores, axis=-1)\n', (8903, 8920), True, 'import numpy as np\n'), ((10351, 10387), 'cv2.imwrite', 'cv2.imwrite', (['"""./result.jpg"""', 'img_raw'], {}), "('./result.jpg', img_raw)\n", (10362, 10387), False, 'import cv2\n'), ((327, 353), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (343, 353), False, 'import os\n'), ((5447, 5473), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (5463, 5473), False, 'import os\n'), ((9697, 9757), 'cv2.rectangle', 'cv2.rectangle', (['img_raw', '(xmin, ymin)', '(xmax, ymax)', 'color', '(2)'], {}), '(img_raw, (xmin, ymin), (xmax, ymax), color, 2)\n', (9710, 9757), False, 'import cv2\n'), ((9783, 9879), 'cv2.putText', 'cv2.putText', (['img_raw', 'text', '(xmin, ymin + 12)', 'cv2.FONT_HERSHEY_DUPLEX', '(0.5)', '(255, 255, 255)'], {}), '(img_raw, text, (xmin, ymin + 12), cv2.FONT_HERSHEY_DUPLEX, 0.5,\n (255, 255, 255))\n', (9794, 9879), False, 'import cv2\n'), ((9932, 9992), 'cv2.circle', 'cv2.circle', (['img_raw', '(landm[0], landm[1])', '(1)', '(0, 0, 255)', '(2)'], {}), '(img_raw, (landm[0], landm[1]), 1, (0, 0, 255), 2)\n', (9942, 9992), False, 'import cv2\n'), ((10005, 10067), 'cv2.circle', 'cv2.circle', (['img_raw', '(landm[2], landm[3])', '(1)', '(0, 255, 255)', '(2)'], {}), '(img_raw, (landm[2], landm[3]), 1, (0, 255, 255), 2)\n', (10015, 10067), False, 'import cv2\n'), ((10080, 10142), 'cv2.circle', 'cv2.circle', (['img_raw', '(landm[4], landm[5])', '(1)', '(255, 0, 255)', '(2)'], {}), '(img_raw, (landm[4], landm[5]), 1, (255, 0, 255), 2)\n', (10090, 10142), False, 'import cv2\n'), ((10155, 10215), 'cv2.circle', 'cv2.circle', (['img_raw', '(landm[6], landm[7])', '(1)', '(0, 255, 0)', '(2)'], {}), '(img_raw, (landm[6], landm[7]), 1, (0, 255, 0), 2)\n', (10165, 10215), False, 'import cv2\n'), ((10228, 10288), 'cv2.circle', 'cv2.circle', (['img_raw', '(landm[8], landm[9])', '(1)', '(255, 0, 0)', '(2)'], {}), '(img_raw, (landm[8], landm[9]), 1, (255, 0, 0), 2)\n', (10238, 10288), False, 'import cv2\n'), ((6465, 6486), 'torch.from_numpy', 'torch.from_numpy', (['img'], {}), '(img)\n', (6481, 6486), False, 'import torch\n'), ((4794, 4830), 'numpy.where', 'np.where', (['(overlap_ratio > iou_thresh)'], {}), '(overlap_ratio > iou_thresh)\n', (4802, 4830), True, 'import numpy as np\n'), ((6690, 6701), 'time.time', 'time.time', ([], {}), '()\n', (6699, 6701), False, 'import time\n')] |
from __future__ import print_function
import sys
import torch
import torch.optim as optim
import numpy as np
from torchvision import datasets, transforms
from absl import app
from absl import flags
import copy
import torch.nn.functional as F
from load_data import MNIST_data, Covertype_data
from model import ConvNet, Linear, Logistic, LinearMnist, ConvNetTest
from rdp_accountant import _compute_rdp, _compute_delta
from get_steps import get_sigma
FLAGS=flags.FLAGS
flags.DEFINE_string('dataset', 'mnist', 'which dataset to run, [mnist, covertype]')
flags.DEFINE_integer('max_steps', 15000, 'maximum training steps')
flags.DEFINE_integer('experiment_id', 0, 'used to distinguish different instance of experiment')
flags.DEFINE_float('q', 0.01, 'samping ratio q')
flags.DEFINE_float('lr', 0.1, 'learning rate')
flags.DEFINE_float('momentum', .6, '0 for not using (0,1):momentum 2:Adam')
flags.DEFINE_float('clip_bound', 12, 'the clipping bound of individual gradient')
flags.DEFINE_bool('convex', True, 'convex objective or not')
flags.DEFINE_float('sigma', -1, 'deprecated')
flags.DEFINE_float('init_sigma', -1, 'initial sigma')
flags.DEFINE_float('epsilon', 2, 'desired epsilon')
flags.DEFINE_float('delta', -1, 'desired delta in (0,1) , other values mean no privately training. IF -1, will be set as 1/n**2')
flags.DEFINE_bool('cuda', True, 'use gpu or not')
flags.DEFINE_float('SGN', 2, 'using varying norm or not, 0 for not using, 1 for both decrease clip bound and sigma, 2 for decrease clip bound and fix sigma')
flags.DEFINE_integer('auto_sigma', 0, '0 for fixed sigma to run target epoched, 1 for fixed bigger sigma')
flags.DEFINE_integer('epoches', 20, 'epoches to run')
def clip_grads(clip_bound, model): #clipping individual gradient with FLAGS.clip_bound
para_norm=0.
for para in model.parameters():
para_norm+=torch.sum(torch.mul(para.grad,para.grad))
para_norm=torch.sqrt(para_norm)
if(para_norm > clip_bound):
for para in model.parameters():
para.grad=torch.div(para.grad, para_norm/clip_bound)
return [param.grad for param in model.parameters()]
def add_noise(sigma, grad_list): #adding noise, notice sigma=FLAGS.sigma*FLAGS.clip_bound
cuda = FLAGS.cuda and torch.cuda.is_available()
device = torch.device("cuda" if cuda and not FLAGS.convex else "cpu")
for i, grad in enumerate(grad_list):
mean=torch.zeros_like(grad).to(device)
stddev=torch.add(torch.zeros_like(grad), sigma).to(device) #mean is used as zeros tensor
grad_list[i]=torch.add(grad, torch.normal(mean,stddev))
return grad_list
def logging(info, mode='a'):
try:
os.mkdir('logs')
except:
pass
f=open('logs/log%d.txt'%FLAGS.experiment_id, mode)
f.write(info)
f.close()
def sampler(tuple): #samping each index with samping probability q
n=tuple[1].shape[0]
rand=np.random.rand(n)
index=(rand<=FLAGS.q)
index=np.arange(0, n)[index]
return tuple[0][index], tuple[1][index]
def get_norm(grads, num=1): #get L2 norm of given list
norm=0.
for grad in grads:
grad=grad/num
norm+=torch.sum(torch.mul(grad,grad))
return np.sqrt(norm)
def vary_noise(diff):
steps=FLAGS.epoches/FLAGS.q
FLAGS.sigma=min(FLAGS.init_sigma+diff, FLAGS.sigma+diff/steps)
def vary_bound(t):
if(FLAGS.dataset=='mnist'):
steps= 500
else:
steps= 500
ratio=min(t/steps, 1)
return 1+ratio
def check_norm(t, model, norm_list, train_loss):
conv_list=[]
fc_list=[]
for (name, _),para in zip(model.named_parameters(), model.parameters()):
if para.requires_grad:
if('conv' in name):
conv_list.append(para.grad)
elif('fc' in name):
fc_list.append(para.grad)
#print(para.data)
#print(len(conv_list))
norm_list.append([get_norm(conv_list), get_norm(fc_list), get_norm(conv_list+fc_list), train_loss])
#print('at %d norm of conv is : '%t, get_norm(conv_list))
#print('at %d norm of fc is : '%t , get_norm(fc_list))
def train(model, device, train_tuple, optimizer, diff, total_privacy_l, t, norm_list):
model.train()
data, target=sampler(train_tuple)
data, target = data.to(device), target.to(device)
if(FLAGS.delta>0 and FLAGS.delta<1): # we are training model privately
train_loss=0.
sigma=FLAGS.sigma
clip_bound=FLAGS.clip_bound
if(FLAGS.SGN==1):
clip_bound=FLAGS.clip_bound/vary_bound(t)
elif(FLAGS.SGN==2):
v=vary_bound(t)
clip_bound=FLAGS.clip_bound/v
sigma=sigma*v
for i in range(data.shape[0]): #Here we compute individual gradients sequentially. However parallel computing is achieveable.
optimizer.zero_grad() #See Ian Goodfellow's post in https://github.com/tensorflow/tensorflow/issues/4897
if(FLAGS.dataset=='mnist' and not FLAGS.convex):
output = model(data[i].reshape([1, 1, 28, 28]))
elif(FLAGS.dataset=='mnist'):
output = model(data[i].reshape([1, 784]))
else:
output = model(data[i].reshape([1,54]))
loss = F.nll_loss(output, target[i].reshape([1]))
train_loss+=loss/data.shape[0]
loss.backward()
if(i==0):
accmulated_grad=clip_grads(clip_bound, model) #clip grads for each set of parameter and return them
else:
#if(t%50==0):
# print(get_norm(clip_grads(FLAGS.clip_bound, model), 1))
accmulated_grad=[x+y for x,y in zip(accmulated_grad, clip_grads(clip_bound, model))]
#print(clip_bound, ' s: ', sigma)
#current_norm=get_norm(accmulated_grad, data.shape[0])
#print('norm before noise: ', current_norm)
if(FLAGS.delta==-1):
accmulated_grad=add_noise(2*clip_bound*sigma, accmulated_grad)#due to the different definition of differential privacy
else:
accmulated_grad=add_noise(clip_bound*sigma, accmulated_grad)#add noise so we can privately release gradient
#print(data.shape[0])
#print(get_norm(accmulated_grad, data.shape[0]))
#We accumulate the rdp of each step. rdp at order t+1 is equivalent to alpha at order t.
#See <NAME>, https://arxiv.org/pdf/1702.07476.pdf The code is from
#https://github.com/tensorflow/models/tree/master/research/differential_privacy/privacy_accountant/python
curr_privacy_l=[_compute_rdp(FLAGS.q, sigma, order) for order in range(2, 2+128)]
total_privacy_l=[x+y for x, y in zip(curr_privacy_l, total_privacy_l)]
for i, (param,grad) in enumerate(zip(model.parameters(), accmulated_grad)): #make use of noisy gradients
param.grad=grad/data.shape[0]
else : # no privately training
optimizer.zero_grad()
output = model(data)
train_loss = F.nll_loss(output, target)
train_loss.backward()
if(t%10==0):
check_norm(t, model, norm_list, train_loss.detach().numpy())
# norm_list.append(get_norm([para.grad for para in model.parameters()]))
# np.save('norm_list.npy', np.array(norm_list))
return total_privacy_l
def test(model, device, test_tuple, t):
model.eval()
test_loss = 0
correct = 0
data, target=test_tuple
with torch.no_grad():
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.max(1, keepdim=True)[1] # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_num=data.shape[0]
test_loss /= test_num
accuracy=100. * correct / test_num
print('At step %d: '%t)
print('\nTest loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, test_num,
accuracy))
return accuracy
def main(argv):
cuda = FLAGS.cuda and torch.cuda.is_available()
device = torch.device("cuda" if cuda and not FLAGS.convex else "cpu")
log_freq=20
total_steps=max(FLAGS.max_steps, int(FLAGS.epoches/FLAGS.q)) #total steps
#if(FLAGS.SGN==2):
# total_steps*=4
if(FLAGS.dataset=='mnist'):
train_tuple=MNIST_data().train()
test_tuple=MNIST_data().test()
if(FLAGS.convex):
train_tuple=(train_tuple[0].reshape(-1, 784), train_tuple[1])
test_tuple=(test_tuple[0].reshape(-1, 784), test_tuple[1])
model = Logistic(FLAGS.dataset).to(device)
else:
#train_tuple=(train_tuple[0].reshape(-1, 784), train_tuple[1])
#test_tuple=(test_tuple[0].reshape(-1, 784), test_tuple[1])
model = ConvNet().to(device)
elif(FLAGS.dataset=='covertype'):
train_tuple=Covertype_data.train()
test_tuple=Covertype_data.test()
if(FLAGS.convex):
model = Logistic(FLAGS.dataset).to(device)
else:
model = Linear().to(device)
if(FLAGS.momentum==0):
optimizer = optim.SGD(model.parameters(), lr=FLAGS.lr, momentum=0)
elif(FLAGS.momentum>0 and FLAGS.momentum<=1):
if(FLAGS.momentum==1):
FLAGS.momentum=0.5
optimizer = optim.SGD(model.parameters(), lr=FLAGS.lr, momentum=FLAGS.momentum)
else:
optimizer = optim.Adam(model.parameters())
if(FLAGS.delta==-1):
FLAGS.delta=1./(train_tuple[0].shape[0]**2)
diff=0
if(FLAGS.delta != 0 and FLAGS.epoches!=-1):
if(FLAGS.auto_sigma==0):
FLAGS.sigma=get_sigma(FLAGS.q, FLAGS.epoches, FLAGS.epsilon, FLAGS.delta)
elif(FLAGS.auto_sigma==1):
FLAGS.SGN=0
FLAGS.sigma=get_sigma(FLAGS.q, FLAGS.epoches, FLAGS.epsilon, FLAGS.delta)
FLAGS.sigma*=2
#FLAGS.sigma=20
#recording information of this experiment instance
experiment_info='Dataset: %r \nSampling probability: %r \nDelta: %r \nConvex: %r \nClip_bound: %r \nSigma: %r\nMomentum: %r\nAuto_sigma: %d\nSGN: %d \nEpoches: %d \nEpsilon: %r \n'%(FLAGS.dataset, FLAGS.q, FLAGS.delta,
FLAGS.convex, FLAGS.clip_bound, FLAGS.sigma, FLAGS.momentum, FLAGS.auto_sigma, FLAGS.SGN, FLAGS.epoches, FLAGS.epsilon)
logging(experiment_info, 'w')
total_privacy_l=[0.]*128 #tracking alpha at different orders [1,128], can be converted to (epsilon,delta)-differential privacy
epsilons=[0.5, 1., 2.0]
deltas=[0., 0., 2.0] #one delta for one epsilon
log_array=[]
norm_list=[]
for t in range(1, total_steps+1):
#print(FLAGS.sigma, 'here')
#get the gradients, notice the optimizer.step() is ran outside the train function.
total_privacy_l=train(model, device, train_tuple, optimizer, diff, total_privacy_l, t, norm_list)
if(FLAGS.delta>0 and FLAGS.delta<1): #training privately
all_failed=True
for i, eps in enumerate(epsilons):
if(deltas[i]>FLAGS.delta): #discarding the epsilon we already failed
continue
#use rdp_accountant to get delta for given epsilon
if_update_delta, order=_compute_delta(range(2,2+128), total_privacy_l, eps)
#print(if_update_delta, 'hereheee')
if(if_update_delta>FLAGS.delta): #record the final model satisfies (eps,deltas[i])-differential privacy
accuracy=test(model, device, test_tuple, t)
info='For epislon %r, delta %r we get accuracy: %r%% at step %r\n'%(eps, deltas[i], accuracy, t)
deltas[i]=1. #abort current epsilon
logging(info)
print(info)
else:
deltas[i]=if_update_delta #update delta
all_failed=False #still got at least one epsilon not failed
if(not all_failed):
optimizer.step()
else :
info='failed at all given epsilon, exiting\n'
print(info)
logging(info)
exit()
else: #training no privately
optimizer.step()
if(t%log_freq==0):
#aa=1
accuracy=test(model, device, test_tuple, t)
log_array.append(copy.deepcopy([t, accuracy, epsilons, deltas]))
np.save('logs/log%d.npy'%FLAGS.experiment_id, np.array(log_array, dtype=object))
#np.save('norm_list.npy', np.array(norm_list, dtype=object))
if __name__ == '__main__':
app.run(main) | [
"torch.mul",
"numpy.sqrt",
"numpy.random.rand",
"torch.sqrt",
"model.Logistic",
"numpy.array",
"torch.cuda.is_available",
"torch.normal",
"copy.deepcopy",
"load_data.Covertype_data.test",
"numpy.arange",
"absl.flags.DEFINE_float",
"torch.nn.functional.nll_loss",
"absl.app.run",
"get_step... | [((472, 559), 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""', '"""mnist"""', '"""which dataset to run, [mnist, covertype]"""'], {}), "('dataset', 'mnist',\n 'which dataset to run, [mnist, covertype]')\n", (491, 559), False, 'from absl import flags\n'), ((556, 622), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""max_steps"""', '(15000)', '"""maximum training steps"""'], {}), "('max_steps', 15000, 'maximum training steps')\n", (576, 622), False, 'from absl import flags\n'), ((623, 723), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""experiment_id"""', '(0)', '"""used to distinguish different instance of experiment"""'], {}), "('experiment_id', 0,\n 'used to distinguish different instance of experiment')\n", (643, 723), False, 'from absl import flags\n'), ((720, 768), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""q"""', '(0.01)', '"""samping ratio q"""'], {}), "('q', 0.01, 'samping ratio q')\n", (738, 768), False, 'from absl import flags\n'), ((769, 815), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""lr"""', '(0.1)', '"""learning rate"""'], {}), "('lr', 0.1, 'learning rate')\n", (787, 815), False, 'from absl import flags\n'), ((816, 892), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""momentum"""', '(0.6)', '"""0 for not using (0,1):momentum 2:Adam"""'], {}), "('momentum', 0.6, '0 for not using (0,1):momentum 2:Adam')\n", (834, 892), False, 'from absl import flags\n'), ((892, 977), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""clip_bound"""', '(12)', '"""the clipping bound of individual gradient"""'], {}), "('clip_bound', 12,\n 'the clipping bound of individual gradient')\n", (910, 977), False, 'from absl import flags\n'), ((974, 1034), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""convex"""', '(True)', '"""convex objective or not"""'], {}), "('convex', True, 'convex objective or not')\n", (991, 1034), False, 'from absl import flags\n'), ((1035, 1080), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""sigma"""', '(-1)', '"""deprecated"""'], {}), "('sigma', -1, 'deprecated')\n", (1053, 1080), False, 'from absl import flags\n'), ((1081, 1134), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""init_sigma"""', '(-1)', '"""initial sigma"""'], {}), "('init_sigma', -1, 'initial sigma')\n", (1099, 1134), False, 'from absl import flags\n'), ((1135, 1186), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""epsilon"""', '(2)', '"""desired epsilon"""'], {}), "('epsilon', 2, 'desired epsilon')\n", (1153, 1186), False, 'from absl import flags\n'), ((1187, 1325), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""delta"""', '(-1)', '"""desired delta in (0,1) , other values mean no privately training. IF -1, will be set as 1/n**2"""'], {}), "('delta', -1,\n 'desired delta in (0,1) , other values mean no privately training. IF -1, will be set as 1/n**2'\n )\n", (1205, 1325), False, 'from absl import flags\n'), ((1317, 1366), 'absl.flags.DEFINE_bool', 'flags.DEFINE_bool', (['"""cuda"""', '(True)', '"""use gpu or not"""'], {}), "('cuda', True, 'use gpu or not')\n", (1334, 1366), False, 'from absl import flags\n'), ((1367, 1533), 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""SGN"""', '(2)', '"""using varying norm or not, 0 for not using, 1 for both decrease clip bound and sigma, 2 for decrease clip bound and fix sigma"""'], {}), "('SGN', 2,\n 'using varying norm or not, 0 for not using, 1 for both decrease clip bound and sigma, 2 for decrease clip bound and fix sigma'\n )\n", (1385, 1533), False, 'from absl import flags\n'), ((1525, 1635), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""auto_sigma"""', '(0)', '"""0 for fixed sigma to run target epoched, 1 for fixed bigger sigma"""'], {}), "('auto_sigma', 0,\n '0 for fixed sigma to run target epoched, 1 for fixed bigger sigma')\n", (1545, 1635), False, 'from absl import flags\n'), ((1632, 1685), 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""epoches"""', '(20)', '"""epoches to run"""'], {}), "('epoches', 20, 'epoches to run')\n", (1652, 1685), False, 'from absl import flags\n'), ((1903, 1924), 'torch.sqrt', 'torch.sqrt', (['para_norm'], {}), '(para_norm)\n', (1913, 1924), False, 'import torch\n'), ((2288, 2348), 'torch.device', 'torch.device', (["('cuda' if cuda and not FLAGS.convex else 'cpu')"], {}), "('cuda' if cuda and not FLAGS.convex else 'cpu')\n", (2300, 2348), False, 'import torch\n'), ((2906, 2923), 'numpy.random.rand', 'np.random.rand', (['n'], {}), '(n)\n', (2920, 2923), True, 'import numpy as np\n'), ((3197, 3210), 'numpy.sqrt', 'np.sqrt', (['norm'], {}), '(norm)\n', (3204, 3210), True, 'import numpy as np\n'), ((8165, 8225), 'torch.device', 'torch.device', (["('cuda' if cuda and not FLAGS.convex else 'cpu')"], {}), "('cuda' if cuda and not FLAGS.convex else 'cpu')\n", (8177, 8225), False, 'import torch\n'), ((12701, 12714), 'absl.app.run', 'app.run', (['main'], {}), '(main)\n', (12708, 12714), False, 'from absl import app\n'), ((2249, 2274), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2272, 2274), False, 'import torch\n'), ((2960, 2975), 'numpy.arange', 'np.arange', (['(0)', 'n'], {}), '(0, n)\n', (2969, 2975), True, 'import numpy as np\n'), ((7011, 7037), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {}), '(output, target)\n', (7021, 7037), True, 'import torch.nn.functional as F\n'), ((7469, 7484), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (7482, 7484), False, 'import torch\n'), ((8126, 8151), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8149, 8151), False, 'import torch\n'), ((1857, 1888), 'torch.mul', 'torch.mul', (['para.grad', 'para.grad'], {}), '(para.grad, para.grad)\n', (1866, 1888), False, 'import torch\n'), ((2020, 2064), 'torch.div', 'torch.div', (['para.grad', '(para_norm / clip_bound)'], {}), '(para.grad, para_norm / clip_bound)\n', (2029, 2064), False, 'import torch\n'), ((2571, 2597), 'torch.normal', 'torch.normal', (['mean', 'stddev'], {}), '(mean, stddev)\n', (2583, 2597), False, 'import torch\n'), ((3164, 3185), 'torch.mul', 'torch.mul', (['grad', 'grad'], {}), '(grad, grad)\n', (3173, 3185), False, 'import torch\n'), ((6591, 6626), 'rdp_accountant._compute_rdp', '_compute_rdp', (['FLAGS.q', 'sigma', 'order'], {}), '(FLAGS.q, sigma, order)\n', (6603, 6626), False, 'from rdp_accountant import _compute_rdp, _compute_delta\n'), ((8968, 8990), 'load_data.Covertype_data.train', 'Covertype_data.train', ([], {}), '()\n', (8988, 8990), False, 'from load_data import MNIST_data, Covertype_data\n'), ((9010, 9031), 'load_data.Covertype_data.test', 'Covertype_data.test', ([], {}), '()\n', (9029, 9031), False, 'from load_data import MNIST_data, Covertype_data\n'), ((9726, 9787), 'get_steps.get_sigma', 'get_sigma', (['FLAGS.q', 'FLAGS.epoches', 'FLAGS.epsilon', 'FLAGS.delta'], {}), '(FLAGS.q, FLAGS.epoches, FLAGS.epsilon, FLAGS.delta)\n', (9735, 9787), False, 'from get_steps import get_sigma\n'), ((2403, 2425), 'torch.zeros_like', 'torch.zeros_like', (['grad'], {}), '(grad)\n', (2419, 2425), False, 'import torch\n'), ((7594, 7637), 'torch.nn.functional.nll_loss', 'F.nll_loss', (['output', 'target'], {'reduction': '"""sum"""'}), "(output, target, reduction='sum')\n", (7604, 7637), True, 'import torch.nn.functional as F\n'), ((8422, 8434), 'load_data.MNIST_data', 'MNIST_data', ([], {}), '()\n', (8432, 8434), False, 'from load_data import MNIST_data, Covertype_data\n'), ((8462, 8474), 'load_data.MNIST_data', 'MNIST_data', ([], {}), '()\n', (8472, 8474), False, 'from load_data import MNIST_data, Covertype_data\n'), ((9873, 9934), 'get_steps.get_sigma', 'get_sigma', (['FLAGS.q', 'FLAGS.epoches', 'FLAGS.epsilon', 'FLAGS.delta'], {}), '(FLAGS.q, FLAGS.epoches, FLAGS.epsilon, FLAGS.delta)\n', (9882, 9934), False, 'from get_steps import get_sigma\n'), ((12461, 12507), 'copy.deepcopy', 'copy.deepcopy', (['[t, accuracy, epsilons, deltas]'], {}), '([t, accuracy, epsilons, deltas])\n', (12474, 12507), False, 'import copy\n'), ((12567, 12600), 'numpy.array', 'np.array', (['log_array'], {'dtype': 'object'}), '(log_array, dtype=object)\n', (12575, 12600), True, 'import numpy as np\n'), ((2462, 2484), 'torch.zeros_like', 'torch.zeros_like', (['grad'], {}), '(grad)\n', (2478, 2484), False, 'import torch\n'), ((8673, 8696), 'model.Logistic', 'Logistic', (['FLAGS.dataset'], {}), '(FLAGS.dataset)\n', (8681, 8696), False, 'from model import ConvNet, Linear, Logistic, LinearMnist, ConvNetTest\n'), ((8889, 8898), 'model.ConvNet', 'ConvNet', ([], {}), '()\n', (8896, 8898), False, 'from model import ConvNet, Linear, Logistic, LinearMnist, ConvNetTest\n'), ((9078, 9101), 'model.Logistic', 'Logistic', (['FLAGS.dataset'], {}), '(FLAGS.dataset)\n', (9086, 9101), False, 'from model import ConvNet, Linear, Logistic, LinearMnist, ConvNetTest\n'), ((9147, 9155), 'model.Linear', 'Linear', ([], {}), '()\n', (9153, 9155), False, 'from model import ConvNet, Linear, Logistic, LinearMnist, ConvNetTest\n')] |
import numpy as np
import pytest
import pandas as pd
from pandas.core.sorting import nargsort
import pandas.util.testing as tm
from .base import BaseExtensionTests
class BaseMethodsTests(BaseExtensionTests):
"""Various Series and DataFrame methods."""
@pytest.mark.parametrize('dropna', [True, False])
def test_value_counts(self, all_data, dropna):
all_data = all_data[:10]
if dropna:
other = np.array(all_data[~all_data.isna()])
else:
other = all_data
result = pd.Series(all_data).value_counts(dropna=dropna).sort_index()
expected = pd.Series(other).value_counts(
dropna=dropna).sort_index()
self.assert_series_equal(result, expected)
def test_count(self, data_missing):
df = pd.DataFrame({"A": data_missing})
result = df.count(axis='columns')
expected = pd.Series([0, 1])
self.assert_series_equal(result, expected)
def test_series_count(self, data_missing):
# GH#26835
ser = pd.Series(data_missing)
result = ser.count()
expected = 1
assert result == expected
def test_apply_simple_series(self, data):
result = pd.Series(data).apply(id)
assert isinstance(result, pd.Series)
def test_argsort(self, data_for_sorting):
result = pd.Series(data_for_sorting).argsort()
expected = pd.Series(np.array([2, 0, 1], dtype=np.int64))
self.assert_series_equal(result, expected)
def test_argsort_missing(self, data_missing_for_sorting):
result = pd.Series(data_missing_for_sorting).argsort()
expected = pd.Series(np.array([1, -1, 0], dtype=np.int64))
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('na_position, expected', [
('last', np.array([2, 0, 1], dtype=np.dtype('intp'))),
('first', np.array([1, 2, 0], dtype=np.dtype('intp')))
])
def test_nargsort(self, data_missing_for_sorting, na_position, expected):
# GH 25439
result = nargsort(data_missing_for_sorting, na_position=na_position)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values(self, data_for_sorting, ascending):
ser = pd.Series(data_for_sorting)
result = ser.sort_values(ascending=ascending)
expected = ser.iloc[[2, 0, 1]]
if not ascending:
expected = expected[::-1]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_missing(self, data_missing_for_sorting, ascending):
ser = pd.Series(data_missing_for_sorting)
result = ser.sort_values(ascending=ascending)
if ascending:
expected = ser.iloc[[2, 0, 1]]
else:
expected = ser.iloc[[0, 2, 1]]
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('ascending', [True, False])
def test_sort_values_frame(self, data_for_sorting, ascending):
df = pd.DataFrame({"A": [1, 2, 1],
"B": data_for_sorting})
result = df.sort_values(['A', 'B'])
expected = pd.DataFrame({"A": [1, 1, 2],
'B': data_for_sorting.take([2, 0, 1])},
index=[2, 0, 1])
self.assert_frame_equal(result, expected)
@pytest.mark.parametrize('box', [pd.Series, lambda x: x])
@pytest.mark.parametrize('method', [lambda x: x.unique(), pd.unique])
def test_unique(self, data, box, method):
duplicated = box(data._from_sequence([data[0], data[0]]))
result = method(duplicated)
assert len(result) == 1
assert isinstance(result, type(data))
assert result[0] == duplicated[0]
@pytest.mark.parametrize('na_sentinel', [-1, -2])
def test_factorize(self, data_for_grouping, na_sentinel):
labels, uniques = pd.factorize(data_for_grouping,
na_sentinel=na_sentinel)
expected_labels = np.array([0, 0, na_sentinel,
na_sentinel, 1, 1, 0, 2],
dtype=np.intp)
expected_uniques = data_for_grouping.take([0, 4, 7])
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
@pytest.mark.parametrize('na_sentinel', [-1, -2])
def test_factorize_equivalence(self, data_for_grouping, na_sentinel):
l1, u1 = pd.factorize(data_for_grouping, na_sentinel=na_sentinel)
l2, u2 = data_for_grouping.factorize(na_sentinel=na_sentinel)
tm.assert_numpy_array_equal(l1, l2)
self.assert_extension_array_equal(u1, u2)
def test_factorize_empty(self, data):
labels, uniques = pd.factorize(data[:0])
expected_labels = np.array([], dtype=np.intp)
expected_uniques = type(data)._from_sequence([], dtype=data[:0].dtype)
tm.assert_numpy_array_equal(labels, expected_labels)
self.assert_extension_array_equal(uniques, expected_uniques)
def test_fillna_copy_frame(self, data_missing):
arr = data_missing.take([1, 1])
df = pd.DataFrame({"A": arr})
filled_val = df.iloc[0, 0]
result = df.fillna(filled_val)
assert df.A.values is not result.A.values
def test_fillna_copy_series(self, data_missing):
arr = data_missing.take([1, 1])
ser = pd.Series(arr)
filled_val = ser[0]
result = ser.fillna(filled_val)
assert ser._values is not result._values
assert ser._values is arr
def test_fillna_length_mismatch(self, data_missing):
msg = "Length of 'value' does not match."
with pytest.raises(ValueError, match=msg):
data_missing.fillna(data_missing.take([1]))
def test_combine_le(self, data_repeated):
# GH 20825
# Test that combine works when doing a <= (le) comparison
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 <= x2)
expected = pd.Series([a <= b for (a, b) in
zip(list(orig_data1), list(orig_data2))])
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 <= x2)
expected = pd.Series([a <= val for a in list(orig_data1)])
self.assert_series_equal(result, expected)
def test_combine_add(self, data_repeated):
# GH 20825
orig_data1, orig_data2 = data_repeated(2)
s1 = pd.Series(orig_data1)
s2 = pd.Series(orig_data2)
result = s1.combine(s2, lambda x1, x2: x1 + x2)
with np.errstate(over='ignore'):
expected = pd.Series(
orig_data1._from_sequence([a + b for (a, b) in
zip(list(orig_data1),
list(orig_data2))]))
self.assert_series_equal(result, expected)
val = s1.iloc[0]
result = s1.combine(val, lambda x1, x2: x1 + x2)
expected = pd.Series(
orig_data1._from_sequence([a + val for a in list(orig_data1)]))
self.assert_series_equal(result, expected)
def test_combine_first(self, data):
# https://github.com/pandas-dev/pandas/issues/24147
a = pd.Series(data[:3])
b = pd.Series(data[2:5], index=[2, 3, 4])
result = a.combine_first(b)
expected = pd.Series(data[:5])
self.assert_series_equal(result, expected)
@pytest.mark.parametrize('frame', [True, False])
@pytest.mark.parametrize('periods, indices', [
(-2, [2, 3, 4, -1, -1]),
(0, [0, 1, 2, 3, 4]),
(2, [-1, -1, 0, 1, 2]),
])
def test_container_shift(self, data, frame, periods, indices):
# https://github.com/pandas-dev/pandas/issues/22386
subset = data[:5]
data = pd.Series(subset, name='A')
expected = pd.Series(subset.take(indices, allow_fill=True), name='A')
if frame:
result = data.to_frame(name='A').assign(B=1).shift(periods)
expected = pd.concat([
expected,
pd.Series([1] * 5, name='B').shift(periods)
], axis=1)
compare = self.assert_frame_equal
else:
result = data.shift(periods)
compare = self.assert_series_equal
compare(result, expected)
@pytest.mark.parametrize('periods, indices', [
[-4, [-1, -1]],
[-1, [1, -1]],
[0, [0, 1]],
[1, [-1, 0]],
[4, [-1, -1]]
])
def test_shift_non_empty_array(self, data, periods, indices):
# https://github.com/pandas-dev/pandas/issues/23911
subset = data[:2]
result = subset.shift(periods)
expected = subset.take(indices, allow_fill=True)
self.assert_extension_array_equal(result, expected)
@pytest.mark.parametrize('periods', [
-4, -1, 0, 1, 4
])
def test_shift_empty_array(self, data, periods):
# https://github.com/pandas-dev/pandas/issues/23911
empty = data[:0]
result = empty.shift(periods)
expected = empty
self.assert_extension_array_equal(result, expected)
def test_shift_fill_value(self, data):
arr = data[:4]
fill_value = data[0]
result = arr.shift(1, fill_value=fill_value)
expected = data.take([0, 0, 1, 2])
self.assert_extension_array_equal(result, expected)
result = arr.shift(-2, fill_value=fill_value)
expected = data.take([2, 3, 0, 0])
self.assert_extension_array_equal(result, expected)
def test_hash_pandas_object_works(self, data, as_frame):
# https://github.com/pandas-dev/pandas/issues/23066
data = pd.Series(data)
if as_frame:
data = data.to_frame()
a = pd.util.hash_pandas_object(data)
b = pd.util.hash_pandas_object(data)
self.assert_equal(a, b)
def test_searchsorted(self, data_for_sorting, as_series):
b, c, a = data_for_sorting
arr = type(data_for_sorting)._from_sequence([a, b, c])
if as_series:
arr = pd.Series(arr)
assert arr.searchsorted(a) == 0
assert arr.searchsorted(a, side="right") == 1
assert arr.searchsorted(b) == 1
assert arr.searchsorted(b, side="right") == 2
assert arr.searchsorted(c) == 2
assert arr.searchsorted(c, side="right") == 3
result = arr.searchsorted(arr.take([0, 2]))
expected = np.array([0, 2], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
# sorter
sorter = np.array([1, 2, 0])
assert data_for_sorting.searchsorted(a, sorter=sorter) == 0
def test_where_series(self, data, na_value, as_frame):
assert data[0] != data[1]
cls = type(data)
a, b = data[:2]
ser = pd.Series(cls._from_sequence([a, a, b, b], dtype=data.dtype))
cond = np.array([True, True, False, False])
if as_frame:
ser = ser.to_frame(name='a')
cond = cond.reshape(-1, 1)
result = ser.where(cond)
expected = pd.Series(cls._from_sequence([a, a, na_value, na_value],
dtype=data.dtype))
if as_frame:
expected = expected.to_frame(name='a')
self.assert_equal(result, expected)
# array other
cond = np.array([True, False, True, True])
other = cls._from_sequence([a, b, a, b], dtype=data.dtype)
if as_frame:
other = pd.DataFrame({"a": other})
cond = pd.DataFrame({"a": cond})
result = ser.where(cond, other)
expected = pd.Series(cls._from_sequence([a, b, b, b],
dtype=data.dtype))
if as_frame:
expected = expected.to_frame(name='a')
self.assert_equal(result, expected)
@pytest.mark.parametrize("repeats", [0, 1, 2, [1, 2, 3]])
def test_repeat(self, data, repeats, as_series, use_numpy):
arr = type(data)._from_sequence(data[:3], dtype=data.dtype)
if as_series:
arr = pd.Series(arr)
result = np.repeat(arr, repeats) if use_numpy else arr.repeat(repeats)
repeats = [repeats] * 3 if isinstance(repeats, int) else repeats
expected = [x for x, n in zip(arr, repeats) for _ in range(n)]
expected = type(data)._from_sequence(expected, dtype=data.dtype)
if as_series:
expected = pd.Series(expected, index=arr.index.repeat(repeats))
self.assert_equal(result, expected)
@pytest.mark.parametrize('repeats, kwargs, error, msg', [
(2, dict(axis=1), ValueError, "'axis"),
(-1, dict(), ValueError, "negative"),
([1, 2], dict(), ValueError, "shape"),
(2, dict(foo='bar'), TypeError, "'foo'")])
def test_repeat_raises(self, data, repeats, kwargs, error, msg, use_numpy):
with pytest.raises(error, match=msg):
if use_numpy:
np.repeat(data, repeats, **kwargs)
else:
data.repeat(repeats, **kwargs)
| [
"pandas.Series",
"pandas.util.testing.assert_numpy_array_equal",
"pandas.factorize",
"numpy.repeat",
"pandas.util.hash_pandas_object",
"pytest.mark.parametrize",
"numpy.array",
"numpy.errstate",
"pandas.core.sorting.nargsort",
"pytest.raises",
"pandas.DataFrame",
"numpy.dtype"
] | [((266, 314), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dropna"""', '[True, False]'], {}), "('dropna', [True, False])\n", (289, 314), False, 'import pytest\n'), ((2168, 2219), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', '[True, False]'], {}), "('ascending', [True, False])\n", (2191, 2219), False, 'import pytest\n'), ((2538, 2589), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', '[True, False]'], {}), "('ascending', [True, False])\n", (2561, 2589), False, 'import pytest\n'), ((2950, 3001), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""ascending"""', '[True, False]'], {}), "('ascending', [True, False])\n", (2973, 3001), False, 'import pytest\n'), ((3434, 3490), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""box"""', '[pd.Series, lambda x: x]'], {}), "('box', [pd.Series, lambda x: x])\n", (3457, 3490), False, 'import pytest\n'), ((3841, 3889), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""na_sentinel"""', '[-1, -2]'], {}), "('na_sentinel', [-1, -2])\n", (3864, 3889), False, 'import pytest\n'), ((4438, 4486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""na_sentinel"""', '[-1, -2]'], {}), "('na_sentinel', [-1, -2])\n", (4461, 4486), False, 'import pytest\n'), ((7709, 7756), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""frame"""', '[True, False]'], {}), "('frame', [True, False])\n", (7732, 7756), False, 'import pytest\n'), ((7762, 7883), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""periods, indices"""', '[(-2, [2, 3, 4, -1, -1]), (0, [0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])]'], {}), "('periods, indices', [(-2, [2, 3, 4, -1, -1]), (0, [\n 0, 1, 2, 3, 4]), (2, [-1, -1, 0, 1, 2])])\n", (7785, 7883), False, 'import pytest\n'), ((8608, 8730), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""periods, indices"""', '[[-4, [-1, -1]], [-1, [1, -1]], [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]]'], {}), "('periods, indices', [[-4, [-1, -1]], [-1, [1, -1]],\n [0, [0, 1]], [1, [-1, 0]], [4, [-1, -1]]])\n", (8631, 8730), False, 'import pytest\n'), ((9087, 9140), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""periods"""', '[-4, -1, 0, 1, 4]'], {}), "('periods', [-4, -1, 0, 1, 4])\n", (9110, 9140), False, 'import pytest\n'), ((12153, 12209), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""repeats"""', '[0, 1, 2, [1, 2, 3]]'], {}), "('repeats', [0, 1, 2, [1, 2, 3]])\n", (12176, 12209), False, 'import pytest\n'), ((793, 826), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': data_missing}"], {}), "({'A': data_missing})\n", (805, 826), True, 'import pandas as pd\n'), ((888, 905), 'pandas.Series', 'pd.Series', (['[0, 1]'], {}), '([0, 1])\n', (897, 905), True, 'import pandas as pd\n'), ((1038, 1061), 'pandas.Series', 'pd.Series', (['data_missing'], {}), '(data_missing)\n', (1047, 1061), True, 'import pandas as pd\n'), ((2048, 2107), 'pandas.core.sorting.nargsort', 'nargsort', (['data_missing_for_sorting'], {'na_position': 'na_position'}), '(data_missing_for_sorting, na_position=na_position)\n', (2056, 2107), False, 'from pandas.core.sorting import nargsort\n'), ((2116, 2161), 'pandas.util.testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['result', 'expected'], {}), '(result, expected)\n', (2143, 2161), True, 'import pandas.util.testing as tm\n'), ((2295, 2322), 'pandas.Series', 'pd.Series', (['data_for_sorting'], {}), '(data_for_sorting)\n', (2304, 2322), True, 'import pandas as pd\n'), ((2681, 2716), 'pandas.Series', 'pd.Series', (['data_missing_for_sorting'], {}), '(data_missing_for_sorting)\n', (2690, 2716), True, 'import pandas as pd\n'), ((3082, 3135), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': [1, 2, 1], 'B': data_for_sorting}"], {}), "({'A': [1, 2, 1], 'B': data_for_sorting})\n", (3094, 3135), True, 'import pandas as pd\n'), ((3978, 4034), 'pandas.factorize', 'pd.factorize', (['data_for_grouping'], {'na_sentinel': 'na_sentinel'}), '(data_for_grouping, na_sentinel=na_sentinel)\n', (3990, 4034), True, 'import pandas as pd\n'), ((4100, 4169), 'numpy.array', 'np.array', (['[0, 0, na_sentinel, na_sentinel, 1, 1, 0, 2]'], {'dtype': 'np.intp'}), '([0, 0, na_sentinel, na_sentinel, 1, 1, 0, 2], dtype=np.intp)\n', (4108, 4169), True, 'import numpy as np\n'), ((4310, 4362), 'pandas.util.testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['labels', 'expected_labels'], {}), '(labels, expected_labels)\n', (4337, 4362), True, 'import pandas.util.testing as tm\n'), ((4578, 4634), 'pandas.factorize', 'pd.factorize', (['data_for_grouping'], {'na_sentinel': 'na_sentinel'}), '(data_for_grouping, na_sentinel=na_sentinel)\n', (4590, 4634), True, 'import pandas as pd\n'), ((4714, 4749), 'pandas.util.testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['l1', 'l2'], {}), '(l1, l2)\n', (4741, 4749), True, 'import pandas.util.testing as tm\n'), ((4869, 4891), 'pandas.factorize', 'pd.factorize', (['data[:0]'], {}), '(data[:0])\n', (4881, 4891), True, 'import pandas as pd\n'), ((4918, 4945), 'numpy.array', 'np.array', (['[]'], {'dtype': 'np.intp'}), '([], dtype=np.intp)\n', (4926, 4945), True, 'import numpy as np\n'), ((5034, 5086), 'pandas.util.testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['labels', 'expected_labels'], {}), '(labels, expected_labels)\n', (5061, 5086), True, 'import pandas.util.testing as tm\n'), ((5262, 5286), 'pandas.DataFrame', 'pd.DataFrame', (["{'A': arr}"], {}), "({'A': arr})\n", (5274, 5286), True, 'import pandas as pd\n'), ((5521, 5535), 'pandas.Series', 'pd.Series', (['arr'], {}), '(arr)\n', (5530, 5535), True, 'import pandas as pd\n'), ((6099, 6120), 'pandas.Series', 'pd.Series', (['orig_data1'], {}), '(orig_data1)\n', (6108, 6120), True, 'import pandas as pd\n'), ((6134, 6155), 'pandas.Series', 'pd.Series', (['orig_data2'], {}), '(orig_data2)\n', (6143, 6155), True, 'import pandas as pd\n'), ((6719, 6740), 'pandas.Series', 'pd.Series', (['orig_data1'], {}), '(orig_data1)\n', (6728, 6740), True, 'import pandas as pd\n'), ((6754, 6775), 'pandas.Series', 'pd.Series', (['orig_data2'], {}), '(orig_data2)\n', (6763, 6775), True, 'import pandas as pd\n'), ((7507, 7526), 'pandas.Series', 'pd.Series', (['data[:3]'], {}), '(data[:3])\n', (7516, 7526), True, 'import pandas as pd\n'), ((7539, 7576), 'pandas.Series', 'pd.Series', (['data[2:5]'], {'index': '[2, 3, 4]'}), '(data[2:5], index=[2, 3, 4])\n', (7548, 7576), True, 'import pandas as pd\n'), ((7632, 7651), 'pandas.Series', 'pd.Series', (['data[:5]'], {}), '(data[:5])\n', (7641, 7651), True, 'import pandas as pd\n'), ((8078, 8105), 'pandas.Series', 'pd.Series', (['subset'], {'name': '"""A"""'}), "(subset, name='A')\n", (8087, 8105), True, 'import pandas as pd\n'), ((9963, 9978), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (9972, 9978), True, 'import pandas as pd\n'), ((10047, 10079), 'pandas.util.hash_pandas_object', 'pd.util.hash_pandas_object', (['data'], {}), '(data)\n', (10073, 10079), True, 'import pandas as pd\n'), ((10092, 10124), 'pandas.util.hash_pandas_object', 'pd.util.hash_pandas_object', (['data'], {}), '(data)\n', (10118, 10124), True, 'import pandas as pd\n'), ((10730, 10761), 'numpy.array', 'np.array', (['[0, 2]'], {'dtype': 'np.intp'}), '([0, 2], dtype=np.intp)\n', (10738, 10761), True, 'import numpy as np\n'), ((10771, 10816), 'pandas.util.testing.assert_numpy_array_equal', 'tm.assert_numpy_array_equal', (['result', 'expected'], {}), '(result, expected)\n', (10798, 10816), True, 'import pandas.util.testing as tm\n'), ((10852, 10871), 'numpy.array', 'np.array', (['[1, 2, 0]'], {}), '([1, 2, 0])\n', (10860, 10871), True, 'import numpy as np\n'), ((11175, 11211), 'numpy.array', 'np.array', (['[True, True, False, False]'], {}), '([True, True, False, False])\n', (11183, 11211), True, 'import numpy as np\n'), ((11646, 11681), 'numpy.array', 'np.array', (['[True, False, True, True]'], {}), '([True, False, True, True])\n', (11654, 11681), True, 'import numpy as np\n'), ((1412, 1447), 'numpy.array', 'np.array', (['[2, 0, 1]'], {'dtype': 'np.int64'}), '([2, 0, 1], dtype=np.int64)\n', (1420, 1447), True, 'import numpy as np\n'), ((1655, 1691), 'numpy.array', 'np.array', (['[1, -1, 0]'], {'dtype': 'np.int64'}), '([1, -1, 0], dtype=np.int64)\n', (1663, 1691), True, 'import numpy as np\n'), ((5810, 5846), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'msg'}), '(ValueError, match=msg)\n', (5823, 5846), False, 'import pytest\n'), ((6845, 6871), 'numpy.errstate', 'np.errstate', ([], {'over': '"""ignore"""'}), "(over='ignore')\n", (6856, 6871), True, 'import numpy as np\n'), ((10359, 10373), 'pandas.Series', 'pd.Series', (['arr'], {}), '(arr)\n', (10368, 10373), True, 'import pandas as pd\n'), ((11790, 11816), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': other}"], {}), "({'a': other})\n", (11802, 11816), True, 'import pandas as pd\n'), ((11836, 11861), 'pandas.DataFrame', 'pd.DataFrame', (["{'a': cond}"], {}), "({'a': cond})\n", (11848, 11861), True, 'import pandas as pd\n'), ((12382, 12396), 'pandas.Series', 'pd.Series', (['arr'], {}), '(arr)\n', (12391, 12396), True, 'import pandas as pd\n'), ((12415, 12438), 'numpy.repeat', 'np.repeat', (['arr', 'repeats'], {}), '(arr, repeats)\n', (12424, 12438), True, 'import numpy as np\n'), ((13186, 13217), 'pytest.raises', 'pytest.raises', (['error'], {'match': 'msg'}), '(error, match=msg)\n', (13199, 13217), False, 'import pytest\n'), ((1210, 1225), 'pandas.Series', 'pd.Series', (['data'], {}), '(data)\n', (1219, 1225), True, 'import pandas as pd\n'), ((1345, 1372), 'pandas.Series', 'pd.Series', (['data_for_sorting'], {}), '(data_for_sorting)\n', (1354, 1372), True, 'import pandas as pd\n'), ((1580, 1615), 'pandas.Series', 'pd.Series', (['data_missing_for_sorting'], {}), '(data_missing_for_sorting)\n', (1589, 1615), True, 'import pandas as pd\n'), ((13261, 13295), 'numpy.repeat', 'np.repeat', (['data', 'repeats'], {}), '(data, repeats, **kwargs)\n', (13270, 13295), True, 'import numpy as np\n'), ((536, 555), 'pandas.Series', 'pd.Series', (['all_data'], {}), '(all_data)\n', (545, 555), True, 'import pandas as pd\n'), ((616, 632), 'pandas.Series', 'pd.Series', (['other'], {}), '(other)\n', (625, 632), True, 'import pandas as pd\n'), ((1844, 1860), 'numpy.dtype', 'np.dtype', (['"""intp"""'], {}), "('intp')\n", (1852, 1860), True, 'import numpy as np\n'), ((1908, 1924), 'numpy.dtype', 'np.dtype', (['"""intp"""'], {}), "('intp')\n", (1916, 1924), True, 'import numpy as np\n'), ((8352, 8380), 'pandas.Series', 'pd.Series', (['([1] * 5)'], {'name': '"""B"""'}), "([1] * 5, name='B')\n", (8361, 8380), True, 'import pandas as pd\n')] |
# 唐诗生成
import collections
import os
import sys
import time
import numpy as np
import tensorflow as tf
# 这里引入可能出错
from models.model import rnn_model
# 句子预处理 产生batch函数
from dataset.fiction import process_poems, generate_batch
import heapq
# 后面那个是说明
tf.flags.DEFINE_integer('batch_size', 64, 'batch size.')
tf.flags.DEFINE_float('learning_rate', 0.01, 'learning rate.')
# set this to 'main.py' relative path
tf.flags.DEFINE_string('checkpoints_dir', os.path.abspath('./checkpoints/zhetian/'), 'checkpoints save path.')
tf.flags.DEFINE_string('file_path', os.path.abspath('./dataset/data/zhetian.txt'), 'file name of poems.')
tf.flags.DEFINE_string('model_prefix', 'poems', 'model save prefix.')
tf.flags.DEFINE_integer('epochs', 50, 'train how many epochs.')
tf.flags.DEFINE_string('write', '', 'wtf.')
tf.flags.DEFINE_string('train', '', 'wtf.')
tf.flags.DEFINE_string('no-train', '', 'wtf.')
FLAGS = tf.flags.FLAGS
# FLAGS._parse_flags()
# 起始和结束字符
start_token = 'G'
end_token = 'E'
'''
运行训练,核心
'''
def run_training():
# 检查点保存路径
print('its_not_ok:', FLAGS.checkpoints_dir)
if not os.path.exists(os.path.dirname(FLAGS.checkpoints_dir)):
os.mkdir(os.path.dirname(FLAGS.checkpoints_dir))
if not os.path.exists(FLAGS.checkpoints_dir):
os.mkdir(FLAGS.checkpoints_dir)
# 引入预处理
# 这里返回诗集转换成向量的数据,字与数字映射, 字集
poems_vector, word_to_int, vocabularies = process_poems(FLAGS.file_path)
# batch_size 64 poems_vector 转为数字的映射 word_to_int:字与数字映射
batches_inputs, batches_outputs = generate_batch(FLAGS.batch_size, poems_vector, word_to_int)
# 返回输入与输出的batch信息
# 输入、输出 占位符
input_data = tf.placeholder(tf.int32, [FLAGS.batch_size, None])
output_targets = tf.placeholder(tf.int32, [FLAGS.batch_size, None])
end_points = rnn_model(model='lstm', input_data=input_data, output_data=output_targets, vocab_size=len(
vocabularies), run_size=128, num_layers=2, batch_size=FLAGS.batch_size, learning_rate=FLAGS.learning_rate)
# 保存
saver = tf.train.Saver(tf.global_variables())
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
start_epoch = 0
checkpoint = tf.train.latest_checkpoint(FLAGS.checkpoints_dir)
if checkpoint:
saver.restore(sess, checkpoint)
print("[INFO] restore from the checkpoint {0}".format(checkpoint))
start_epoch += int(checkpoint.split('-')[-1])
print('[INFO] start training...',time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
try:
for epoch in range(start_epoch, FLAGS.epochs):
n = 0
n_chunk = len(poems_vector) // FLAGS.batch_size
for batch in range(n_chunk):
loss, _, _ = sess.run([
end_points['total_loss'],
end_points['last_state'],
end_points['train_op']
], feed_dict={input_data:batches_inputs[n], output_targets:batches_outputs[n]})
n += 1
print('[INFO] Epoch: %d, batch: %d, training loss: %.6f' % (epoch,batch, loss))
if epoch % 6 == 0:
saver.save(sess, './zhetian_model/', global_step=epoch)
except KeyboardInterrupt:
print('[INFO] Interrupt manually, try saving checkpoint for now ..')
saver.save(sess, os.path.join(FLAGS.checkpoints_dir, FLAGS.model_prefix), global_step=epoch)
print('[INFO] Last epoch were saved, next time will start from epoch {}.'.format(epoch))
def to_word(predict, vocabs):
t = np.cumsum(predict)
s = np.sum(predict)
sample = int(np.searchsorted(t, np.random.rand(1) * s))
if sample > len(vocabs):
sample = len(vocabs) - 1
return vocabs[sample]
def gen_poem(begin_word):
batch_size = 1
print('[INFO] loading corpus from %s' % FLAGS.file_path)
poems_vector, word_int_map, vocabularies = process_poems(FLAGS.file_path)
input_data = tf.placeholder(tf.int32, [batch_size, None])
end_points = rnn_model(model='lstm', input_data=input_data, output_data=None, vocab_size=len(vocabularies), run_size=128, num_layers=2,batch_size=64, learning_rate=FLAGS.learning_rate)
saver = tf.train.Saver(tf.global_variables())
init_op = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())
with tf.Session() as sess:
sess.run(init_op)
checkpoint = tf.train.latest_checkpoint('./zhetian_model/')
saver.restore(sess, './zhetian_model/-48')
x = np.array([list(map(word_int_map.get, start_token))])
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']], feed_dict={input_data: x})
if begin_word:
word = begin_word
else:
word = to_word(predict, vocabularies)
poem = ''
while word != end_token:
print('running')
poem += word
x = np.zeros((1, 1))
x[0,0] = word_int_map[word]
[predict, last_state] = sess.run([end_points['prediction'], end_points['last_state']],
feed_dict={input_data: x, end_points['initial_state']:last_state})
word = to_word(predict, vocabularies)
return poem
def pretty_print_poem(poem):
poem_sentences = poem.split('。 ')
for s in poem_sentences:
if s != '' and len(s) > 10:
print(s)
def main(is_train):
print('zhetian.main:', is_train)
if is_train:
print('[INFO] train zhetian fiction...')
run_training()
else:
print('[INFO] write zhetian fiction...')
begin_word = input('输入起始字:')
poem2 = gen_poem(begin_word)
pretty_print_poem(poem2)
if __name__ == '__main__':
tf.app.run() | [
"tensorflow.local_variables_initializer",
"numpy.random.rand",
"tensorflow.app.run",
"tensorflow.flags.DEFINE_string",
"os.path.exists",
"tensorflow.placeholder",
"tensorflow.flags.DEFINE_float",
"tensorflow.Session",
"os.mkdir",
"dataset.fiction.process_poems",
"time.localtime",
"tensorflow.g... | [((248, 304), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""batch_size"""', '(64)', '"""batch size."""'], {}), "('batch_size', 64, 'batch size.')\n", (271, 304), True, 'import tensorflow as tf\n'), ((305, 367), 'tensorflow.flags.DEFINE_float', 'tf.flags.DEFINE_float', (['"""learning_rate"""', '(0.01)', '"""learning rate."""'], {}), "('learning_rate', 0.01, 'learning rate.')\n", (326, 367), True, 'import tensorflow as tf\n'), ((626, 695), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""model_prefix"""', '"""poems"""', '"""model save prefix."""'], {}), "('model_prefix', 'poems', 'model save prefix.')\n", (648, 695), True, 'import tensorflow as tf\n'), ((696, 759), 'tensorflow.flags.DEFINE_integer', 'tf.flags.DEFINE_integer', (['"""epochs"""', '(50)', '"""train how many epochs."""'], {}), "('epochs', 50, 'train how many epochs.')\n", (719, 759), True, 'import tensorflow as tf\n'), ((760, 803), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""write"""', '""""""', '"""wtf."""'], {}), "('write', '', 'wtf.')\n", (782, 803), True, 'import tensorflow as tf\n'), ((804, 847), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""train"""', '""""""', '"""wtf."""'], {}), "('train', '', 'wtf.')\n", (826, 847), True, 'import tensorflow as tf\n'), ((848, 894), 'tensorflow.flags.DEFINE_string', 'tf.flags.DEFINE_string', (['"""no-train"""', '""""""', '"""wtf."""'], {}), "('no-train', '', 'wtf.')\n", (870, 894), True, 'import tensorflow as tf\n'), ((449, 490), 'os.path.abspath', 'os.path.abspath', (['"""./checkpoints/zhetian/"""'], {}), "('./checkpoints/zhetian/')\n", (464, 490), False, 'import os\n'), ((554, 599), 'os.path.abspath', 'os.path.abspath', (['"""./dataset/data/zhetian.txt"""'], {}), "('./dataset/data/zhetian.txt')\n", (569, 599), False, 'import os\n'), ((1391, 1421), 'dataset.fiction.process_poems', 'process_poems', (['FLAGS.file_path'], {}), '(FLAGS.file_path)\n', (1404, 1421), False, 'from dataset.fiction import process_poems, generate_batch\n'), ((1522, 1581), 'dataset.fiction.generate_batch', 'generate_batch', (['FLAGS.batch_size', 'poems_vector', 'word_to_int'], {}), '(FLAGS.batch_size, poems_vector, word_to_int)\n', (1536, 1581), False, 'from dataset.fiction import process_poems, generate_batch\n'), ((1638, 1688), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[FLAGS.batch_size, None]'], {}), '(tf.int32, [FLAGS.batch_size, None])\n', (1652, 1688), True, 'import tensorflow as tf\n'), ((1710, 1760), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[FLAGS.batch_size, None]'], {}), '(tf.int32, [FLAGS.batch_size, None])\n', (1724, 1760), True, 'import tensorflow as tf\n'), ((3682, 3700), 'numpy.cumsum', 'np.cumsum', (['predict'], {}), '(predict)\n', (3691, 3700), True, 'import numpy as np\n'), ((3709, 3724), 'numpy.sum', 'np.sum', (['predict'], {}), '(predict)\n', (3715, 3724), True, 'import numpy as np\n'), ((4028, 4058), 'dataset.fiction.process_poems', 'process_poems', (['FLAGS.file_path'], {}), '(FLAGS.file_path)\n', (4041, 4058), False, 'from dataset.fiction import process_poems, generate_batch\n'), ((4077, 4121), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32', '[batch_size, None]'], {}), '(tf.int32, [batch_size, None])\n', (4091, 4121), True, 'import tensorflow as tf\n'), ((5899, 5911), 'tensorflow.app.run', 'tf.app.run', ([], {}), '()\n', (5909, 5911), True, 'import tensorflow as tf\n'), ((1221, 1258), 'os.path.exists', 'os.path.exists', (['FLAGS.checkpoints_dir'], {}), '(FLAGS.checkpoints_dir)\n', (1235, 1258), False, 'import os\n'), ((1268, 1299), 'os.mkdir', 'os.mkdir', (['FLAGS.checkpoints_dir'], {}), '(FLAGS.checkpoints_dir)\n', (1276, 1299), False, 'import os\n'), ((2022, 2043), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (2041, 2043), True, 'import tensorflow as tf\n'), ((2068, 2101), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (2099, 2101), True, 'import tensorflow as tf\n'), ((2103, 2135), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (2133, 2135), True, 'import tensorflow as tf\n'), ((2146, 2158), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (2156, 2158), True, 'import tensorflow as tf\n'), ((2240, 2289), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['FLAGS.checkpoints_dir'], {}), '(FLAGS.checkpoints_dir)\n', (2266, 2289), True, 'import tensorflow as tf\n'), ((4340, 4361), 'tensorflow.global_variables', 'tf.global_variables', ([], {}), '()\n', (4359, 4361), True, 'import tensorflow as tf\n'), ((4386, 4419), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4417, 4419), True, 'import tensorflow as tf\n'), ((4421, 4453), 'tensorflow.local_variables_initializer', 'tf.local_variables_initializer', ([], {}), '()\n', (4451, 4453), True, 'import tensorflow as tf\n'), ((4464, 4476), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4474, 4476), True, 'import tensorflow as tf\n'), ((4534, 4580), 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./zhetian_model/"""'], {}), "('./zhetian_model/')\n", (4560, 4580), True, 'import tensorflow as tf\n'), ((1112, 1150), 'os.path.dirname', 'os.path.dirname', (['FLAGS.checkpoints_dir'], {}), '(FLAGS.checkpoints_dir)\n', (1127, 1150), False, 'import os\n'), ((1170, 1208), 'os.path.dirname', 'os.path.dirname', (['FLAGS.checkpoints_dir'], {}), '(FLAGS.checkpoints_dir)\n', (1185, 1208), False, 'import os\n'), ((5061, 5077), 'numpy.zeros', 'np.zeros', (['(1, 1)'], {}), '((1, 1))\n', (5069, 5077), True, 'import numpy as np\n'), ((2570, 2586), 'time.localtime', 'time.localtime', ([], {}), '()\n', (2584, 2586), False, 'import time\n'), ((3761, 3778), 'numpy.random.rand', 'np.random.rand', (['(1)'], {}), '(1)\n', (3775, 3778), True, 'import numpy as np\n'), ((3466, 3521), 'os.path.join', 'os.path.join', (['FLAGS.checkpoints_dir', 'FLAGS.model_prefix'], {}), '(FLAGS.checkpoints_dir, FLAGS.model_prefix)\n', (3478, 3521), False, 'import os\n')] |
# Zipline API
from zipline.api import attach_pipeline, pipeline_output, schedule_function, get_open_orders, order_target_percent
from zipline.pipeline import Pipeline
from zipline.utils.events import date_rules, time_rules
from zipline.pipeline.factors import AverageDollarVolume
from zipline import run_algorithm
# Data frame
import numpy as np
import pandas as pd
import statsmodels.api as sm
# Logging
from websocket import create_connection
# Data frame to JSON
from ..api.create_response import create_json_response
def trend_follow_run(start_date, end_date, capital_base, log_channel):
ws = create_connection("ws://alpharithmic.herokuapp.com/ws/logs/%s/" % log_channel)
msg_placeholder = "{\"message\": \"%s\"}"
ws.send(msg_placeholder % "Link Start")
def initialize(context):
ws.send(msg_placeholder % "Simulation Start")
context.lookback = 252 # Period to calculate slope and drawdown
context.max_leverage = 1.0 # Leverage
context.profit_take = 1.96 # 95% of bollinger band
context.minimum_return = 0.1 # Enter if and only if annualized slope exceeds this level
context.max_drawdown = 0.10 # Avoid if too much drawdown
context.market_impact = 0.2 # Max order is 10% of market trading volume
context.weights = {} # Slope at time of entry
context.drawdown = {} # Drawdown at time of entry
context.shares = {} # Daily target share
schedule_function(func=stop_loss, date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(minutes=30))
ws.send(msg_placeholder % "Execution of stop loss scheduled at 30 minutes after market open")
schedule_function(func=regression, date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(minutes=50))
ws.send(msg_placeholder % "Execution of regression computation scheduled at 50 minutes after market open")
schedule_function(func=trade, date_rule=date_rules.every_day(),
time_rule=time_rules.market_open(minutes=100))
ws.send(msg_placeholder % "Execution of transaction planner scheduled at 100 minutes after market open")
for thirty_minute_interval in range(30, 391, 30):
schedule_function(execute_transactions, date_rules.every_day(),
time_rules.market_open(minutes=thirty_minute_interval)) # execute every 30 minutes
ws.send(msg_placeholder % "Execution of transactions scheduled at every 30 minutes")
attach_pipeline(create_high_dollar_volume_pipeline(), 'top_dollar_volume')
ws.send(msg_placeholder % "High Dollar Volume pipeline filter attached")
def create_high_dollar_volume_pipeline():
pipe = Pipeline()
dollar_volume = AverageDollarVolume(window_length=63) # 63 days = 1 quarter
pipe.add(dollar_volume, 'dollar_volume')
high_dollar_volume = dollar_volume.percentile_between(95, 100) # top 5% by dollar volume
pipe.set_screen(high_dollar_volume)
return pipe
def before_trading_start(context, data):
context.pipe_output = pipeline_output('top_dollar_volume')
context.security_list = context.pipe_output.index
def regression(context, data):
prices = data.history(context.security_list, 'open', context.lookback, '1d')
X = range(len(prices))
# Add constant to ensure intercept
A = sm.add_constant(X)
for s in context.security_list:
# Price movement standard deviation
sd = prices[s].std()
# Price points to run regression
Y = prices[s].values
if np.isnan(Y).any():
continue
# y = ax + b
results = sm.OLS(Y, A).fit()
(b, a) = results.params
slope = a / Y[-1] * 252 # Daily return regression * 1 year
global dd
if slope > 0:
dd = drawdown(Y)
elif slope < 0:
dd = drawdown(-Y)
# How far are we from regression line?
delta = Y - (np.dot(a, X) + b)
slope_min = max(dd, context.minimum_return)
gain = get_gain(context, s)
# Exit
if s in context.weights and context.weights[s] != 0:
# Long but slope turns down
if context.weights[s] > 0 and slope < 0:
context.weights[s] = 0
ws.send(msg_placeholder % ('Gained %+2d%% for %s, exited from long because slope turns bull'
% (gain*100, str(s))))
# Short but slope turns up
elif context.weights[s] < 0 and slope > 0:
context.weights[s] = 0
ws.send(msg_placeholder % ('Gained %+2d%% for %s, exited from short because slope turns bear'
% (gain*100, str(s))))
# Profit take reaches top 95% bollinger band
elif delta[-1] > context.profit_take * sd and s in context.weights and context.weights[s] > 0:
context.weights[s] = 0
ws.send(msg_placeholder %
('Gained %+2d%% for %s, exited from long because profit take at top 95%% of bollinger band'
% (gain * 100, str(s))))
elif delta[-1] < -context.profit_take * sd and context.weights[s] < 0:
context.weights[s] = 0
ws.send(msg_placeholder %
('Gained %+2d%% for %s, exited from long because profit take at top 95%% of bollinger band'
% (gain * 100, str(s))))
# Enter
else:
# Trend is up and price crosses the regression line
if slope > slope_min and delta[-1] > 0 and delta[-2] < 0 and dd < context.max_drawdown:
context.weights[s] = slope
context.drawdown[s] = slope_min
ws.send(msg_placeholder %
('Bought %s because trend is up and price crosses regression line' % (str(s))))
# Trend is down and price crosses the regression line
if slope < -slope_min and delta[-1] < 0 and delta[-2] > 0 and dd < context.max_drawdown:
context.weights[s] = slope
context.drawdown[s] = slope_min
ws.send(msg_placeholder %
('Shorted %s because trend is down and price crosses regression line' % (str(s))))
def execute_transactions(context, data):
open_orders = get_open_orders()
for s in context.shares:
if not data.can_trade(s) or s in open_orders:
continue
pct_shares = context.shares[s]
order_target_percent(s, pct_shares)
def trade(context, data):
weights = context.weights
positions = sum(weights[weight] != 0 for weight in weights)
held_positions = [p for p in context.portfolio.positions if context.portfolio.positions[p].amount != 0]
context.securities = context.security_list.tolist() + held_positions
for security in context.securities:
if security not in weights:
context.shares.pop(security, 0)
context.drawdown.pop(security, 0)
elif weights[security] == 0:
context.shares.pop(security, 0)
context.drawdown.pop(security, 0)
elif weights[security] > 0:
context.shares[security] = context.max_leverage / positions
elif weights[security] < 0:
context.shares[security] = -(context.max_leverage / positions)
def stop_loss(context, data):
prices = data.history(list(context.portfolio.positions), 'price', context.lookback, '1d')
for s in context.portfolio.positions:
if s not in context.weights or context.weights[s] == 0:
context.shares[s] = 0
continue
if s not in prices or s in get_open_orders():
continue
gain = get_gain(context, s)
if context.portfolio.positions[s].amount > 0 and drawdown(prices[s].values) > context.drawdown[s]:
context.weights[s] = 0
context.shares[s] = 0 # stop loss
ws.send(msg_placeholder %
('Exited from long because of stop loss with change of %+2d%% for %s,'
% (gain * 100, str(s))))
elif context.portfolio.positions[s].amount < 0 and drawdown(- prices[s].values) > context.drawdown[s]:
context.weights[s] = 0
context.shares[s] = 0
ws.send(msg_placeholder %
('Exited from short because of stop loss with change of %+2d%% for %s,'
% (gain * 100, str(s))))
def drawdown(xs):
if len(xs) == 0:
return 0
period_end = np.argmax(np.maximum.accumulate(xs) - xs)
if len(xs[:period_end]) == 0:
return 0
period_start = np.argmax(xs[:period_end])
return abs((xs[period_start] - xs[period_end]) / xs[period_end])
def get_gain(context, s):
gain = 0
if s in context.portfolio.positions:
cost = context.portfolio.positions[s].cost_basis
amount = context.portfolio.positions[s].amount
price = context.portfolio.positions[s].last_sale_price
if cost == 0:
return 0
if amount > 0:
gain = price / cost - 1
elif amount < 0:
gain = 1 - price / cost
return gain
start = pd.to_datetime(start_date).tz_localize('US/Eastern')
end = pd.to_datetime(end_date).tz_localize('US/Eastern')
result = run_algorithm(start, end,
initialize=initialize, before_trading_start=before_trading_start,
capital_base=capital_base,
bundle="quandl")
ws.send(msg_placeholder % "Simulation End")
ws.send(msg_placeholder % "Fetching backtest results from Redis Queue...")
result.dropna(inplace=True)
ws.close()
return create_json_response(result)
| [
"zipline.pipeline.Pipeline",
"zipline.api.get_open_orders",
"numpy.argmax",
"zipline.utils.events.date_rules.every_day",
"zipline.utils.events.time_rules.market_open",
"statsmodels.api.add_constant",
"zipline.api.pipeline_output",
"zipline.api.order_target_percent",
"zipline.pipeline.factors.Average... | [((607, 685), 'websocket.create_connection', 'create_connection', (["('ws://alpharithmic.herokuapp.com/ws/logs/%s/' % log_channel)"], {}), "('ws://alpharithmic.herokuapp.com/ws/logs/%s/' % log_channel)\n", (624, 685), False, 'from websocket import create_connection\n'), ((10095, 10235), 'zipline.run_algorithm', 'run_algorithm', (['start', 'end'], {'initialize': 'initialize', 'before_trading_start': 'before_trading_start', 'capital_base': 'capital_base', 'bundle': '"""quandl"""'}), "(start, end, initialize=initialize, before_trading_start=\n before_trading_start, capital_base=capital_base, bundle='quandl')\n", (10108, 10235), False, 'from zipline import run_algorithm\n'), ((2837, 2847), 'zipline.pipeline.Pipeline', 'Pipeline', ([], {}), '()\n', (2845, 2847), False, 'from zipline.pipeline import Pipeline\n'), ((2873, 2910), 'zipline.pipeline.factors.AverageDollarVolume', 'AverageDollarVolume', ([], {'window_length': '(63)'}), '(window_length=63)\n', (2892, 2910), False, 'from zipline.pipeline.factors import AverageDollarVolume\n'), ((3223, 3259), 'zipline.api.pipeline_output', 'pipeline_output', (['"""top_dollar_volume"""'], {}), "('top_dollar_volume')\n", (3238, 3259), False, 'from zipline.api import attach_pipeline, pipeline_output, schedule_function, get_open_orders, order_target_percent\n'), ((3528, 3546), 'statsmodels.api.add_constant', 'sm.add_constant', (['X'], {}), '(X)\n', (3543, 3546), True, 'import statsmodels.api as sm\n'), ((6821, 6838), 'zipline.api.get_open_orders', 'get_open_orders', ([], {}), '()\n', (6836, 6838), False, 'from zipline.api import attach_pipeline, pipeline_output, schedule_function, get_open_orders, order_target_percent\n'), ((9361, 9387), 'numpy.argmax', 'np.argmax', (['xs[:period_end]'], {}), '(xs[:period_end])\n', (9370, 9387), True, 'import numpy as np\n'), ((7013, 7048), 'zipline.api.order_target_percent', 'order_target_percent', (['s', 'pct_shares'], {}), '(s, pct_shares)\n', (7033, 7048), False, 'from zipline.api import attach_pipeline, pipeline_output, schedule_function, get_open_orders, order_target_percent\n'), ((9967, 9993), 'pandas.to_datetime', 'pd.to_datetime', (['start_date'], {}), '(start_date)\n', (9981, 9993), True, 'import pandas as pd\n'), ((10030, 10054), 'pandas.to_datetime', 'pd.to_datetime', (['end_date'], {}), '(end_date)\n', (10044, 10054), True, 'import pandas as pd\n'), ((1541, 1563), 'zipline.utils.events.date_rules.every_day', 'date_rules.every_day', ([], {}), '()\n', (1561, 1563), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((1601, 1635), 'zipline.utils.events.time_rules.market_open', 'time_rules.market_open', ([], {'minutes': '(30)'}), '(minutes=30)\n', (1623, 1635), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((1794, 1816), 'zipline.utils.events.date_rules.every_day', 'date_rules.every_day', ([], {}), '()\n', (1814, 1816), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((1854, 1888), 'zipline.utils.events.time_rules.market_open', 'time_rules.market_open', ([], {'minutes': '(50)'}), '(minutes=50)\n', (1876, 1888), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((2055, 2077), 'zipline.utils.events.date_rules.every_day', 'date_rules.every_day', ([], {}), '()\n', (2075, 2077), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((2115, 2150), 'zipline.utils.events.time_rules.market_open', 'time_rules.market_open', ([], {'minutes': '(100)'}), '(minutes=100)\n', (2137, 2150), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((2377, 2399), 'zipline.utils.events.date_rules.every_day', 'date_rules.every_day', ([], {}), '()\n', (2397, 2399), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((2431, 2485), 'zipline.utils.events.time_rules.market_open', 'time_rules.market_open', ([], {'minutes': 'thirty_minute_interval'}), '(minutes=thirty_minute_interval)\n', (2453, 2485), False, 'from zipline.utils.events import date_rules, time_rules\n'), ((9245, 9270), 'numpy.maximum.accumulate', 'np.maximum.accumulate', (['xs'], {}), '(xs)\n', (9266, 9270), True, 'import numpy as np\n'), ((3765, 3776), 'numpy.isnan', 'np.isnan', (['Y'], {}), '(Y)\n', (3773, 3776), True, 'import numpy as np\n'), ((3857, 3869), 'statsmodels.api.OLS', 'sm.OLS', (['Y', 'A'], {}), '(Y, A)\n', (3863, 3869), True, 'import statsmodels.api as sm\n'), ((4207, 4219), 'numpy.dot', 'np.dot', (['a', 'X'], {}), '(a, X)\n', (4213, 4219), True, 'import numpy as np\n'), ((8287, 8304), 'zipline.api.get_open_orders', 'get_open_orders', ([], {}), '()\n', (8302, 8304), False, 'from zipline.api import attach_pipeline, pipeline_output, schedule_function, get_open_orders, order_target_percent\n')] |
import configparser
import re
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import tweepy
from textblob import TextBlob, Word
from textblob.sentiments import NaiveBayesAnalyzer
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from wordcloud import STOPWORDS, WordCloud
analyser = SentimentIntensityAnalyzer()
parser = configparser.ConfigParser()
class TwitterSentimentsAnalysis(object):
def __init__(self, configdict={}):
self._configdict = configdict
self._twitter = None
@property
def twitter(self):
if not self._twitter:
auth = tweepy.OAuthHandler(
self._configdict["consumer_key"], self._configdict["consumer_secret"]
)
auth.set_access_token(
self._configdict["oauth_token"], self._configdict["oauth_secret"]
)
# creating the API object
self._twitter = tweepy.API(auth)
return self._twitter
@staticmethod
def _remove_pattern(input_txt, pattern):
r = re.findall(pattern, input_txt)
for i in r:
input_txt = re.sub(i, "", input_txt)
return " ".join(input_txt.split())
def _clean_tweets(self, lst):
# remove twitter Return handles (RT @xxx:)
lst = np.vectorize(self._remove_pattern)(lst, "RT @[\w]*:")
# remove twitter handles (@xxx)
lst = np.vectorize(self._remove_pattern)(lst, "@[\w]*")
# remove URL links (httpxxx)
lst = np.vectorize(self._remove_pattern)(lst, "https?://[A-Za-z0-9./]*")
# remove special characters, numbers, punctuations (except for #)
lst = np.core.defchararray.replace(lst, "[^a-zA-Z#]", " ")
return lst
def search_tweets(self, search, count=2000):
results = []
for tweet in tweepy.Cursor(self.twitter.search, q=search, lang="en").items(count):
results.append(tweet)
if results:
list_of_ids = [result.id for result in results]
data_set = pd.DataFrame(list_of_ids, columns=["id"])
data_set["created_at"] = [result.created_at for result in results]
try:
data_set["hashtag"] = list(
set(
hashtags.get("text", None)
for result in results
if result.entities.get("hashtags")
for hashtags in result.entities.get("hashtags", None)
)
)
except Exception:
data_set["hashtag"] = [
result.entities.get("hashtags") for result in results
]
data_set["retweet_count"] = [result.retweet_count for result in results]
data_set["text"] = [result.text for result in results]
data_set["user_followers"] = [
result.user.followers_count for result in results
]
data_set["user_name"] = [result.author.screen_name for result in results]
data_set["user_location"] = [result.user.location for result in results]
# Clean and Remove duplicates
cleaned_texts = self._clean_tweets(data_set["text"])
cleaned_texts = list(cleaned_texts)
for index, text in enumerate(cleaned_texts):
data_set.at[index, "text_duplicates"] = text
data_set.drop_duplicates("text_duplicates", inplace=True)
data_set.reset_index(drop=True, inplace=True)
data_set.drop("text", axis=1, inplace=True)
data_set.rename(columns={"text_duplicates": "text"}, inplace=True)
return data_set
@staticmethod
def _sentiment_analyzer_scores(text):
score = analyser.polarity_scores(text)
lb = score["compound"]
if lb >= 0.05:
return "Positive"
elif (lb > -0.05) and (lb < 0.05):
return "Neutral"
else:
return "Negative"
def generate_sentiments(self, data_set):
assert isinstance(data_set, pd.core.frame.DataFrame)
texts = data_set.get("text")
for text in texts:
sentiment = self._sentiment_analyzer_scores(text)
data_set.at[index, "SentimentalPolarityVader"] = sentiment
return data_set
def generate_wordcloud(self, words, image_title="Sentiment"):
wordcloud = WordCloud(
background_color="black",
stopwords=STOPWORDS,
width=1600,
height=800,
random_state=1,
colormap="jet",
max_words=50,
max_font_size=200,
).generate(words)
plt.title(image_title, fontsize=20, color="Red")
plt.figure()
plt.axis("off")
plt.imshow(wordcloud, interpolation="bilinear")
plt.savefig(f"{image_title}.png", bbox_inches="tight", dpi=300)
@staticmethod
def save_to_csv(data_set, filename="data_frame.csv"):
assert isinstance(data_set, pd.core.frame.DataFrame)
data_set.to_csv(file_name, sep="\t", encoding="utf-8")
if __name__ == "__main__":
config_file = Path("config.ini")
if config_file:
parser.read(config_file)
configdict = {
section: dict(parser.items(section)) for section in parser.sections()
}
twitterAPI = TwitterSentimentsAnalysis(configdict=configdict["Twitter Keys"])
results = twitterAPI.search_tweets("food")
results = twitterAPI.generate_sentiments(results)
words = " ".join(results["text"])
results = twitterAPI.generate_wordcloud(words)
twitterAPI.save_to_csv(results)
| [
"matplotlib.pyplot.imshow",
"vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer",
"matplotlib.pyplot.savefig",
"configparser.ConfigParser",
"pandas.DataFrame",
"pathlib.Path",
"tweepy.Cursor",
"matplotlib.pyplot.axis",
"wordcloud.WordCloud",
"numpy.core.defchararray.replace",
"matplotlib.p... | [((353, 381), 'vaderSentiment.vaderSentiment.SentimentIntensityAnalyzer', 'SentimentIntensityAnalyzer', ([], {}), '()\n', (379, 381), False, 'from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\n'), ((391, 418), 'configparser.ConfigParser', 'configparser.ConfigParser', ([], {}), '()\n', (416, 418), False, 'import configparser\n'), ((5199, 5217), 'pathlib.Path', 'Path', (['"""config.ini"""'], {}), "('config.ini')\n", (5203, 5217), False, 'from pathlib import Path\n'), ((1095, 1125), 're.findall', 're.findall', (['pattern', 'input_txt'], {}), '(pattern, input_txt)\n', (1105, 1125), False, 'import re\n'), ((1702, 1754), 'numpy.core.defchararray.replace', 'np.core.defchararray.replace', (['lst', '"""[^a-zA-Z#]"""', '""" """'], {}), "(lst, '[^a-zA-Z#]', ' ')\n", (1730, 1754), True, 'import numpy as np\n'), ((4729, 4777), 'matplotlib.pyplot.title', 'plt.title', (['image_title'], {'fontsize': '(20)', 'color': '"""Red"""'}), "(image_title, fontsize=20, color='Red')\n", (4738, 4777), True, 'import matplotlib.pyplot as plt\n'), ((4786, 4798), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (4796, 4798), True, 'import matplotlib.pyplot as plt\n'), ((4807, 4822), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (4815, 4822), True, 'import matplotlib.pyplot as plt\n'), ((4831, 4878), 'matplotlib.pyplot.imshow', 'plt.imshow', (['wordcloud'], {'interpolation': '"""bilinear"""'}), "(wordcloud, interpolation='bilinear')\n", (4841, 4878), True, 'import matplotlib.pyplot as plt\n'), ((4887, 4950), 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""{image_title}.png"""'], {'bbox_inches': '"""tight"""', 'dpi': '(300)'}), "(f'{image_title}.png', bbox_inches='tight', dpi=300)\n", (4898, 4950), True, 'import matplotlib.pyplot as plt\n'), ((655, 750), 'tweepy.OAuthHandler', 'tweepy.OAuthHandler', (["self._configdict['consumer_key']", "self._configdict['consumer_secret']"], {}), "(self._configdict['consumer_key'], self._configdict[\n 'consumer_secret'])\n", (674, 750), False, 'import tweepy\n'), ((973, 989), 'tweepy.API', 'tweepy.API', (['auth'], {}), '(auth)\n', (983, 989), False, 'import tweepy\n'), ((1170, 1194), 're.sub', 're.sub', (['i', '""""""', 'input_txt'], {}), "(i, '', input_txt)\n", (1176, 1194), False, 'import re\n'), ((1338, 1372), 'numpy.vectorize', 'np.vectorize', (['self._remove_pattern'], {}), '(self._remove_pattern)\n', (1350, 1372), True, 'import numpy as np\n'), ((1446, 1480), 'numpy.vectorize', 'np.vectorize', (['self._remove_pattern'], {}), '(self._remove_pattern)\n', (1458, 1480), True, 'import numpy as np\n'), ((1547, 1581), 'numpy.vectorize', 'np.vectorize', (['self._remove_pattern'], {}), '(self._remove_pattern)\n', (1559, 1581), True, 'import numpy as np\n'), ((2073, 2114), 'pandas.DataFrame', 'pd.DataFrame', (['list_of_ids'], {'columns': "['id']"}), "(list_of_ids, columns=['id'])\n", (2085, 2114), True, 'import pandas as pd\n'), ((1866, 1921), 'tweepy.Cursor', 'tweepy.Cursor', (['self.twitter.search'], {'q': 'search', 'lang': '"""en"""'}), "(self.twitter.search, q=search, lang='en')\n", (1879, 1921), False, 'import tweepy\n'), ((4451, 4601), 'wordcloud.WordCloud', 'WordCloud', ([], {'background_color': '"""black"""', 'stopwords': 'STOPWORDS', 'width': '(1600)', 'height': '(800)', 'random_state': '(1)', 'colormap': '"""jet"""', 'max_words': '(50)', 'max_font_size': '(200)'}), "(background_color='black', stopwords=STOPWORDS, width=1600, height\n =800, random_state=1, colormap='jet', max_words=50, max_font_size=200)\n", (4460, 4601), False, 'from wordcloud import STOPWORDS, WordCloud\n')] |
import numpy as np
import tensorflow as tf
from ammf.utils.wavedata.tools.core import geometry_utils
from ammf.core import box_3d_encoder
from ammf.core import format_checker
"""Box4c Encoder
Converts boxes between the box_3d and box_4c formats.
- box_4c format: [x1, x2, x3, x4, z1, z2, z3, z4, h1, h2]
- corners are in the xz plane, numbered clockwise starting at the top right
- h1 is the height above the ground plane to the bottom of the box
- h2 is the height above the ground plane to the top of the box
"""
def np_box_3d_to_box_4c(box_3d, ground_plane):
"""Converts a single box_3d to box_4c
Args:
box_3d: box_3d (6,)
ground_plane: ground plane coefficients (4,)
Returns:
box_4c (10,)
"""
format_checker.check_box_3d_format(box_3d)
anchor = box_3d_encoder.box_3d_to_anchor(box_3d, ortho_rotate=True)[0]
centroid_x = anchor[0]
centroid_y = anchor[1]
centroid_z = anchor[2]
dim_x = anchor[3]
dim_y = anchor[4]
dim_z = anchor[5]
# Create temporary box at (0, 0) for rotation
half_dim_x = dim_x / 2
half_dim_z = dim_z / 2
# Box corners
x_corners = np.asarray([half_dim_x, half_dim_x,
-half_dim_x, -half_dim_x])
z_corners = np.array([half_dim_z, -half_dim_z,
-half_dim_z, half_dim_z])
ry = box_3d[6]
# Find nearest 90 degree
half_pi = np.pi / 2
ortho_ry = np.round(ry / half_pi) * half_pi
# Find rotation to make the box ortho aligned
ry_diff = ry - ortho_ry
# Create transformation matrix, including rotation and translation
tr_mat = np.array([[np.cos(ry_diff), np.sin(ry_diff), centroid_x],
[-np.sin(ry_diff), np.cos(ry_diff), centroid_z],
[0, 0, 1]])
# Create a ones row
ones_row = np.ones(x_corners.shape)
# Append the column of ones to be able to multiply
points_stacked = np.vstack([x_corners, z_corners, ones_row])
corners = np.matmul(tr_mat, points_stacked)
# Discard the last row (ones)
corners = corners[0:2]
# Calculate height off ground plane
ground_y = geometry_utils.calculate_plane_point(
ground_plane, [centroid_x, None, centroid_z])[1]
h1 = ground_y - centroid_y
h2 = h1 + dim_y
# Stack into (10,) ndarray
box_4c = np.hstack([corners.flatten(), h1, h2])
return box_4c
def tf_box_3d_to_box_4c(boxes_3d, ground_plane):
"""Vectorized conversion of box_3d to box_4c tensors
Args:
boxes_3d: Tensor of boxes_3d (N, 7)
ground_plane: Tensor ground plane coefficients (4,)
Returns:
Tensor of boxes_4c (N, 10)
"""
format_checker.check_box_3d_format(boxes_3d)
anchors = box_3d_encoder.tf_box_3d_to_anchor(boxes_3d)
centroid_x = anchors[:, 0]
centroid_y = anchors[:, 1]
centroid_z = anchors[:, 2]
dim_x = anchors[:, 3]
dim_y = anchors[:, 4]
dim_z = anchors[:, 5]
# Create temporary box at (0, 0) for rotation
half_dim_x = dim_x / 2
half_dim_z = dim_z / 2
# Box corners
x_corners = tf.stack([half_dim_x, half_dim_x,
-half_dim_x, -half_dim_x], axis=1)
z_corners = tf.stack([half_dim_z, -half_dim_z,
-half_dim_z, half_dim_z], axis=1)
# Rotations from boxes_3d
all_rys = boxes_3d[:, 6]
# Find nearest 90 degree
half_pi = np.pi / 2
ortho_rys = tf.round(all_rys / half_pi) * half_pi
# Get rys and 0/1 padding
ry_diffs = all_rys - ortho_rys
zeros = tf.zeros_like(ry_diffs, dtype=tf.float32)
ones = tf.ones_like(ry_diffs, dtype=tf.float32)
# Create transformation matrix, including rotation and translation
tr_mat = tf.stack(
[tf.stack([tf.cos(ry_diffs), tf.sin(ry_diffs), centroid_x], axis=1),
tf.stack([-tf.sin(ry_diffs), tf.cos(ry_diffs), centroid_z], axis=1),
tf.stack([zeros, zeros, ones], axis=1)],
axis=2)
# Create a ones row
ones_row = tf.ones_like(x_corners)
# Append the column of ones to be able to multiply
points_stacked = tf.stack([x_corners, z_corners, ones_row], axis=1)
corners = tf.matmul(tr_mat, points_stacked,
transpose_a=True,
transpose_b=False)
# Discard the last row (ones)
corners = corners[:, 0:2]
flat_corners = tf.reshape(corners, [-1, 8])
# Get ground plane coefficients
a = ground_plane[0]
b = ground_plane[1]
c = ground_plane[2]
d = ground_plane[3]
# Calculate heights off ground plane
ground_y = -(a * centroid_x + c * centroid_z + d) / b
h1 = ground_y - centroid_y
h2 = h1 + dim_y
batched_h1 = tf.reshape(h1, [-1, 1])
batched_h2 = tf.reshape(h2, [-1, 1])
# Stack into (?, 10)
box_4c = tf.concat([flat_corners, batched_h1, batched_h2], axis=1)
return box_4c
def np_box_4c_to_box_3d(box_4c, ground_plane):
"""Converts a single box_4c to box_3d. The longest midpoint-midpoint
length is used to calculate orientation. Points are projected onto the
orientation vector and the orthogonal vector to get the bounding box_3d.
The centroid is calculated by adding a vector of half the projected length
along the midpoint-midpoint vector, and a vector of the width
differences along the normal.
Args:
box_4c: box_4c to convert (10,)
ground_plane: ground plane coefficients (4,)
Returns:
box_3d (7,)
"""
format_checker.check_box_4c_format(box_4c)
# Extract corners
corners = box_4c[0:8].reshape(2, 4)
p1 = corners[:, 0]
p2 = corners[:, 1]
p3 = corners[:, 2]
p4 = corners[:, 3]
# Check for longest axis
midpoint_12 = (p1 + p2) / 2.0
midpoint_23 = (p2 + p3) / 2.0
midpoint_34 = (p3 + p4) / 2.0
midpoint_14 = (p1 + p4) / 2.0
vec_34_12 = midpoint_12 - midpoint_34
vec_34_12_mag = np.linalg.norm(vec_34_12)
vec_23_14 = midpoint_14 - midpoint_23
vec_23_14_mag = np.linalg.norm(vec_23_14)
# Check which midpoint -> midpoint vector is longer
if vec_34_12_mag > vec_23_14_mag:
# vec_34_12_mag longer
vec_34_12_norm = vec_34_12 / vec_34_12_mag
vec_mid_34_p1 = p1 - midpoint_34
vec_mid_34_p2 = p2 - midpoint_34
vec_mid_34_p3 = p3 - midpoint_34
vec_mid_34_p4 = p4 - midpoint_34
l1 = np.dot(vec_mid_34_p1, vec_34_12_norm)
l2 = np.dot(vec_mid_34_p2, vec_34_12_norm)
l3 = np.dot(vec_mid_34_p3, vec_34_12_norm)
l4 = np.dot(vec_mid_34_p4, vec_34_12_norm)
all_lengths = [l1, l2, l3, l4]
min_l = np.amin(all_lengths)
max_l = np.amax(all_lengths)
length_out = max_l - min_l
ortho_norm = np.asarray([-vec_34_12_norm[1], vec_34_12_norm[0]])
w1 = np.dot(vec_mid_34_p1, ortho_norm)
w2 = np.dot(vec_mid_34_p2, ortho_norm)
w3 = np.dot(vec_mid_34_p3, ortho_norm)
w4 = np.dot(vec_mid_34_p4, ortho_norm)
all_widths = [w1, w2, w3, w4]
min_w = np.amin(all_widths)
max_w = np.amax(all_widths)
w_diff = max_w + min_w
width_out = max_w - min_w
ry_out = -np.arctan2(vec_34_12[1], vec_34_12[0])
# New centroid
centroid = midpoint_34 + vec_34_12_norm * (min_l + max_l) / 2.0 + \
ortho_norm * w_diff
else:
# vec_23_14_mag longer
vec_23_14_norm = vec_23_14 / vec_23_14_mag
vec_mid_23_p1 = p1 - midpoint_23
vec_mid_23_p2 = p2 - midpoint_23
vec_mid_23_p3 = p3 - midpoint_23
vec_mid_23_p4 = p4 - midpoint_23
l1 = np.dot(vec_mid_23_p1, vec_23_14_norm)
l2 = np.dot(vec_mid_23_p2, vec_23_14_norm)
l3 = np.dot(vec_mid_23_p3, vec_23_14_norm)
l4 = np.dot(vec_mid_23_p4, vec_23_14_norm)
all_lengths = [l1, l2, l3, l4]
min_l = np.amin(all_lengths)
max_l = np.amax(all_lengths)
length_out = max_l - min_l
ortho_norm = np.asarray([-vec_23_14_norm[1], vec_23_14_norm[0]])
w1 = np.dot(vec_mid_23_p1, ortho_norm)
w2 = np.dot(vec_mid_23_p2, ortho_norm)
w3 = np.dot(vec_mid_23_p3, ortho_norm)
w4 = np.dot(vec_mid_23_p4, ortho_norm)
all_widths = [w1, w2, w3, w4]
min_w = np.amin(all_widths)
max_w = np.amax(all_widths)
w_diff = max_w + min_w
width_out = max_w - min_w
ry_out = -np.arctan2(vec_23_14[1], vec_23_14[0])
# New centroid
centroid = midpoint_23 + vec_23_14_norm * (min_l + max_l) / 2.0 + \
ortho_norm * w_diff
# Find new centroid y
a = ground_plane[0]
b = ground_plane[1]
c = ground_plane[2]
d = ground_plane[3]
h1 = box_4c[8]
h2 = box_4c[9]
centroid_x = centroid[0]
centroid_z = centroid[1]
ground_y = -(a * centroid_x + c * centroid_z + d) / b
# h1 and h2 are along the -y axis
centroid_y = ground_y - h1
height_out = h2 - h1
box_3d_out = np.stack([centroid_x, centroid_y, centroid_z,
length_out, width_out, height_out, ry_out])
return box_3d_out
def calculate_box_3d_info(vec_dir, vec_dir_mag,
p1, p2, p3, p4, midpoint):
"""Calculates the box_3d centroid xz, l, w, and ry from the 4 points of
a box_4c. To calculate length and width, points are projected onto the
direction vector, and its normal. The centroid is calculated by adding
vectors of half the length, and the width difference along the normal to
the starting midpoint. ry is calculated with atan2 of the direction vector.
Args:
vec_dir: vector of longest box_4c midpoint to midpoint
vec_dir_mag: magnitude of the direction vector
p1: point 1
p2: point 2
p3: point 3
p4: point 4
midpoint: starting midpoint
Returns:
box_3d info (centroid, length_out, width_out, ry_out)
"""
vec_dir_norm = vec_dir / tf.reshape(vec_dir_mag, [-1, 1])
vec_mid_p1 = p1 - midpoint
vec_mid_p2 = p2 - midpoint
vec_mid_p3 = p3 - midpoint
vec_mid_p4 = p4 - midpoint
l1 = tf.reduce_sum(tf.multiply(vec_mid_p1, vec_dir_norm), axis=1)
l2 = tf.reduce_sum(tf.multiply(vec_mid_p2, vec_dir_norm), axis=1)
l3 = tf.reduce_sum(tf.multiply(vec_mid_p3, vec_dir_norm), axis=1)
l4 = tf.reduce_sum(tf.multiply(vec_mid_p4, vec_dir_norm), axis=1)
all_lengths = tf.stack([l1, l2, l3, l4], axis=1)
min_l = tf.reduce_min(all_lengths, axis=1, keep_dims=True)
max_l = tf.reduce_max(all_lengths, axis=1, keep_dims=True)
length_out = max_l - min_l
vec_dir_ortho_norm = tf.stack([-vec_dir_norm[:, 1],
vec_dir_norm[:, 0]], axis=1)
w1 = tf.reduce_sum(tf.multiply(vec_mid_p1,
vec_dir_ortho_norm), axis=1)
w2 = tf.reduce_sum(tf.multiply(vec_mid_p2,
vec_dir_ortho_norm), axis=1)
w3 = tf.reduce_sum(tf.multiply(vec_mid_p3,
vec_dir_ortho_norm), axis=1)
w4 = tf.reduce_sum(tf.multiply(vec_mid_p4,
vec_dir_ortho_norm), axis=1)
all_widths = tf.stack([w1, w2, w3, w4], axis=1)
min_w = tf.reduce_min(all_widths, axis=1)
max_w = tf.reduce_max(all_widths, axis=1)
w_diff = tf.reshape(max_w + min_w, [-1, 1])
width_out = tf.reshape(max_w - min_w, [-1, 1])
ry_out = tf.reshape(-tf.atan2(vec_dir[:, 1], vec_dir[:, 0]), [-1, 1])
# New centroid
centroid = midpoint +\
vec_dir_norm * (min_l + max_l) / 2.0 + \
vec_dir_ortho_norm * w_diff
return centroid, length_out, width_out, ry_out
def tf_box_4c_to_box_3d(boxes_4c, ground_plane):
"""Vectorized box_4c to box_3d conversion
Args:
boxes_4c: Tensor of boxes_4c (N, 10)
ground_plane: Tensor of ground plane coefficients (4,)
Returns:
Tensor of boxes_3d (N, 7)
"""
format_checker.check_box_4c_format(boxes_4c)
# Extract corners
corners = tf.reshape(boxes_4c[:, 0:8], [-1, 2, 4])
p1 = corners[:, :, 0]
p2 = corners[:, :, 1]
p3 = corners[:, :, 2]
p4 = corners[:, :, 3]
# Get line midpoints
midpoint_12 = (p1 + p2) / 2.0
midpoint_23 = (p2 + p3) / 2.0
midpoint_34 = (p3 + p4) / 2.0
midpoint_14 = (p1 + p4) / 2.0
# Check which direction is longer
vec_34_12 = midpoint_12 - midpoint_34
vec_34_12_mag = tf.norm(vec_34_12, axis=1)
vec_23_14 = midpoint_14 - midpoint_23
vec_23_14_mag = tf.norm(vec_23_14, axis=1)
# Calculate both possibilities (vec_34_12_mag or vec_23_14_mag),
# then mask out the values from the shorter direction
# vec_34_12_mag longer
vec_34_12_centroid, vec_34_12_length, vec_34_12_width, vec_34_12_ry = \
calculate_box_3d_info(vec_34_12, vec_34_12_mag,
p1, p2, p3, p4, midpoint=midpoint_34)
# vec_23_14_mag longer
vec_23_14_centroid, vec_23_14_length, vec_23_14_width, vec_23_14_ry = \
calculate_box_3d_info(vec_23_14, vec_23_14_mag,
p1, p2, p3, p4, midpoint=midpoint_23)
vec_34_12_mask = tf.greater(vec_34_12_mag, vec_23_14_mag)
vec_23_14_mask = tf.logical_not(vec_34_12_mask)
vec_34_12_float_mask = tf.reshape(
tf.cast(vec_34_12_mask, tf.float32), [-1, 1])
vec_23_14_float_mask = tf.reshape(
tf.cast(vec_23_14_mask, tf.float32), [-1, 1])
centroid_xz = vec_34_12_centroid * vec_34_12_float_mask + \
vec_23_14_centroid * vec_23_14_float_mask
length_out = vec_34_12_length * vec_34_12_float_mask + \
vec_23_14_length * vec_23_14_float_mask
width_out = vec_34_12_width * vec_34_12_float_mask + \
vec_23_14_width * vec_23_14_float_mask
ry_out = vec_34_12_ry * vec_34_12_float_mask + \
vec_23_14_ry * vec_23_14_float_mask
# Find new centroid y
a = ground_plane[0]
b = ground_plane[1]
c = ground_plane[2]
d = ground_plane[3]
h1 = boxes_4c[:, 8]
h2 = boxes_4c[:, 9]
centroid_x = centroid_xz[:, 0]
centroid_z = centroid_xz[:, 1]
# Squeeze to single dimension for stacking
length_out = tf.squeeze(length_out)
width_out = tf.squeeze(width_out)
ry_out = tf.squeeze(ry_out)
ground_y = -(a * centroid_x + c * centroid_z + d) / b
# h1 and h2 are along the -y axis
centroid_y = ground_y - h1
height_out = h2 - h1
box_3d_out = tf.stack([centroid_x, centroid_y, centroid_z,
length_out, width_out, height_out, ry_out], axis=1)
return box_3d_out
def tf_box_4c_to_offsets(boxes_4c, box_4c_gt):
"""Calculates box_4c offsets to regress to ground truth
Args:
boxes_4c: boxes_4c to calculate offset for (N, 10)
box_4c_gt: box_4c ground truth to regress to (10,)
Returns:
box_4c offsets (N, 10)
"""
return box_4c_gt - boxes_4c
def tf_offsets_to_box_4c(boxes_4c, offsets):
"""Applies box_4c offsets to boxes_4c
Args:
boxes_4c: boxes_4c to apply offsets to
offsets: box_4c offsets to apply
Returns:
regressed boxes_4c
"""
return boxes_4c + offsets
| [
"tensorflow.round",
"tensorflow.atan2",
"ammf.core.format_checker.check_box_3d_format",
"tensorflow.logical_not",
"tensorflow.multiply",
"numpy.array",
"numpy.arctan2",
"numpy.linalg.norm",
"tensorflow.ones_like",
"ammf.utils.wavedata.tools.core.geometry_utils.calculate_plane_point",
"ammf.core.... | [((748, 790), 'ammf.core.format_checker.check_box_3d_format', 'format_checker.check_box_3d_format', (['box_3d'], {}), '(box_3d)\n', (782, 790), False, 'from ammf.core import format_checker\n'), ((1155, 1217), 'numpy.asarray', 'np.asarray', (['[half_dim_x, half_dim_x, -half_dim_x, -half_dim_x]'], {}), '([half_dim_x, half_dim_x, -half_dim_x, -half_dim_x])\n', (1165, 1217), True, 'import numpy as np\n'), ((1263, 1323), 'numpy.array', 'np.array', (['[half_dim_z, -half_dim_z, -half_dim_z, half_dim_z]'], {}), '([half_dim_z, -half_dim_z, -half_dim_z, half_dim_z])\n', (1271, 1323), True, 'import numpy as np\n'), ((1841, 1865), 'numpy.ones', 'np.ones', (['x_corners.shape'], {}), '(x_corners.shape)\n', (1848, 1865), True, 'import numpy as np\n'), ((1943, 1986), 'numpy.vstack', 'np.vstack', (['[x_corners, z_corners, ones_row]'], {}), '([x_corners, z_corners, ones_row])\n', (1952, 1986), True, 'import numpy as np\n'), ((2001, 2034), 'numpy.matmul', 'np.matmul', (['tr_mat', 'points_stacked'], {}), '(tr_mat, points_stacked)\n', (2010, 2034), True, 'import numpy as np\n'), ((2685, 2729), 'ammf.core.format_checker.check_box_3d_format', 'format_checker.check_box_3d_format', (['boxes_3d'], {}), '(boxes_3d)\n', (2719, 2729), False, 'from ammf.core import format_checker\n'), ((2745, 2789), 'ammf.core.box_3d_encoder.tf_box_3d_to_anchor', 'box_3d_encoder.tf_box_3d_to_anchor', (['boxes_3d'], {}), '(boxes_3d)\n', (2779, 2789), False, 'from ammf.core import box_3d_encoder\n'), ((3102, 3170), 'tensorflow.stack', 'tf.stack', (['[half_dim_x, half_dim_x, -half_dim_x, -half_dim_x]'], {'axis': '(1)'}), '([half_dim_x, half_dim_x, -half_dim_x, -half_dim_x], axis=1)\n', (3110, 3170), True, 'import tensorflow as tf\n'), ((3214, 3282), 'tensorflow.stack', 'tf.stack', (['[half_dim_z, -half_dim_z, -half_dim_z, half_dim_z]'], {'axis': '(1)'}), '([half_dim_z, -half_dim_z, -half_dim_z, half_dim_z], axis=1)\n', (3222, 3282), True, 'import tensorflow as tf\n'), ((3555, 3596), 'tensorflow.zeros_like', 'tf.zeros_like', (['ry_diffs'], {'dtype': 'tf.float32'}), '(ry_diffs, dtype=tf.float32)\n', (3568, 3596), True, 'import tensorflow as tf\n'), ((3608, 3648), 'tensorflow.ones_like', 'tf.ones_like', (['ry_diffs'], {'dtype': 'tf.float32'}), '(ry_diffs, dtype=tf.float32)\n', (3620, 3648), True, 'import tensorflow as tf\n'), ((4005, 4028), 'tensorflow.ones_like', 'tf.ones_like', (['x_corners'], {}), '(x_corners)\n', (4017, 4028), True, 'import tensorflow as tf\n'), ((4106, 4156), 'tensorflow.stack', 'tf.stack', (['[x_corners, z_corners, ones_row]'], {'axis': '(1)'}), '([x_corners, z_corners, ones_row], axis=1)\n', (4114, 4156), True, 'import tensorflow as tf\n'), ((4171, 4241), 'tensorflow.matmul', 'tf.matmul', (['tr_mat', 'points_stacked'], {'transpose_a': '(True)', 'transpose_b': '(False)'}), '(tr_mat, points_stacked, transpose_a=True, transpose_b=False)\n', (4180, 4241), True, 'import tensorflow as tf\n'), ((4374, 4402), 'tensorflow.reshape', 'tf.reshape', (['corners', '[-1, 8]'], {}), '(corners, [-1, 8])\n', (4384, 4402), True, 'import tensorflow as tf\n'), ((4705, 4728), 'tensorflow.reshape', 'tf.reshape', (['h1', '[-1, 1]'], {}), '(h1, [-1, 1])\n', (4715, 4728), True, 'import tensorflow as tf\n'), ((4746, 4769), 'tensorflow.reshape', 'tf.reshape', (['h2', '[-1, 1]'], {}), '(h2, [-1, 1])\n', (4756, 4769), True, 'import tensorflow as tf\n'), ((4809, 4866), 'tensorflow.concat', 'tf.concat', (['[flat_corners, batched_h1, batched_h2]'], {'axis': '(1)'}), '([flat_corners, batched_h1, batched_h2], axis=1)\n', (4818, 4866), True, 'import tensorflow as tf\n'), ((5488, 5530), 'ammf.core.format_checker.check_box_4c_format', 'format_checker.check_box_4c_format', (['box_4c'], {}), '(box_4c)\n', (5522, 5530), False, 'from ammf.core import format_checker\n'), ((5916, 5941), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_34_12'], {}), '(vec_34_12)\n', (5930, 5941), True, 'import numpy as np\n'), ((6005, 6030), 'numpy.linalg.norm', 'np.linalg.norm', (['vec_23_14'], {}), '(vec_23_14)\n', (6019, 6030), True, 'import numpy as np\n'), ((8989, 9082), 'numpy.stack', 'np.stack', (['[centroid_x, centroid_y, centroid_z, length_out, width_out, height_out, ry_out]'], {}), '([centroid_x, centroid_y, centroid_z, length_out, width_out,\n height_out, ry_out])\n', (8997, 9082), True, 'import numpy as np\n'), ((10430, 10464), 'tensorflow.stack', 'tf.stack', (['[l1, l2, l3, l4]'], {'axis': '(1)'}), '([l1, l2, l3, l4], axis=1)\n', (10438, 10464), True, 'import tensorflow as tf\n'), ((10478, 10528), 'tensorflow.reduce_min', 'tf.reduce_min', (['all_lengths'], {'axis': '(1)', 'keep_dims': '(True)'}), '(all_lengths, axis=1, keep_dims=True)\n', (10491, 10528), True, 'import tensorflow as tf\n'), ((10541, 10591), 'tensorflow.reduce_max', 'tf.reduce_max', (['all_lengths'], {'axis': '(1)', 'keep_dims': '(True)'}), '(all_lengths, axis=1, keep_dims=True)\n', (10554, 10591), True, 'import tensorflow as tf\n'), ((10649, 10708), 'tensorflow.stack', 'tf.stack', (['[-vec_dir_norm[:, 1], vec_dir_norm[:, 0]]'], {'axis': '(1)'}), '([-vec_dir_norm[:, 1], vec_dir_norm[:, 0]], axis=1)\n', (10657, 10708), True, 'import tensorflow as tf\n'), ((11205, 11239), 'tensorflow.stack', 'tf.stack', (['[w1, w2, w3, w4]'], {'axis': '(1)'}), '([w1, w2, w3, w4], axis=1)\n', (11213, 11239), True, 'import tensorflow as tf\n'), ((11253, 11286), 'tensorflow.reduce_min', 'tf.reduce_min', (['all_widths'], {'axis': '(1)'}), '(all_widths, axis=1)\n', (11266, 11286), True, 'import tensorflow as tf\n'), ((11299, 11332), 'tensorflow.reduce_max', 'tf.reduce_max', (['all_widths'], {'axis': '(1)'}), '(all_widths, axis=1)\n', (11312, 11332), True, 'import tensorflow as tf\n'), ((11346, 11380), 'tensorflow.reshape', 'tf.reshape', (['(max_w + min_w)', '[-1, 1]'], {}), '(max_w + min_w, [-1, 1])\n', (11356, 11380), True, 'import tensorflow as tf\n'), ((11397, 11431), 'tensorflow.reshape', 'tf.reshape', (['(max_w - min_w)', '[-1, 1]'], {}), '(max_w - min_w, [-1, 1])\n', (11407, 11431), True, 'import tensorflow as tf\n'), ((11967, 12011), 'ammf.core.format_checker.check_box_4c_format', 'format_checker.check_box_4c_format', (['boxes_4c'], {}), '(boxes_4c)\n', (12001, 12011), False, 'from ammf.core import format_checker\n'), ((12049, 12089), 'tensorflow.reshape', 'tf.reshape', (['boxes_4c[:, 0:8]', '[-1, 2, 4]'], {}), '(boxes_4c[:, 0:8], [-1, 2, 4])\n', (12059, 12089), True, 'import tensorflow as tf\n'), ((12458, 12484), 'tensorflow.norm', 'tf.norm', (['vec_34_12'], {'axis': '(1)'}), '(vec_34_12, axis=1)\n', (12465, 12484), True, 'import tensorflow as tf\n'), ((12548, 12574), 'tensorflow.norm', 'tf.norm', (['vec_23_14'], {'axis': '(1)'}), '(vec_23_14, axis=1)\n', (12555, 12574), True, 'import tensorflow as tf\n'), ((13181, 13221), 'tensorflow.greater', 'tf.greater', (['vec_34_12_mag', 'vec_23_14_mag'], {}), '(vec_34_12_mag, vec_23_14_mag)\n', (13191, 13221), True, 'import tensorflow as tf\n'), ((13243, 13273), 'tensorflow.logical_not', 'tf.logical_not', (['vec_34_12_mask'], {}), '(vec_34_12_mask)\n', (13257, 13273), True, 'import tensorflow as tf\n'), ((14196, 14218), 'tensorflow.squeeze', 'tf.squeeze', (['length_out'], {}), '(length_out)\n', (14206, 14218), True, 'import tensorflow as tf\n'), ((14235, 14256), 'tensorflow.squeeze', 'tf.squeeze', (['width_out'], {}), '(width_out)\n', (14245, 14256), True, 'import tensorflow as tf\n'), ((14270, 14288), 'tensorflow.squeeze', 'tf.squeeze', (['ry_out'], {}), '(ry_out)\n', (14280, 14288), True, 'import tensorflow as tf\n'), ((14461, 14562), 'tensorflow.stack', 'tf.stack', (['[centroid_x, centroid_y, centroid_z, length_out, width_out, height_out, ry_out]'], {'axis': '(1)'}), '([centroid_x, centroid_y, centroid_z, length_out, width_out,\n height_out, ry_out], axis=1)\n', (14469, 14562), True, 'import tensorflow as tf\n'), ((805, 863), 'ammf.core.box_3d_encoder.box_3d_to_anchor', 'box_3d_encoder.box_3d_to_anchor', (['box_3d'], {'ortho_rotate': '(True)'}), '(box_3d, ortho_rotate=True)\n', (836, 863), False, 'from ammf.core import box_3d_encoder\n'), ((1439, 1461), 'numpy.round', 'np.round', (['(ry / half_pi)'], {}), '(ry / half_pi)\n', (1447, 1461), True, 'import numpy as np\n'), ((2153, 2239), 'ammf.utils.wavedata.tools.core.geometry_utils.calculate_plane_point', 'geometry_utils.calculate_plane_point', (['ground_plane', '[centroid_x, None, centroid_z]'], {}), '(ground_plane, [centroid_x, None,\n centroid_z])\n', (2189, 2239), False, 'from ammf.utils.wavedata.tools.core import geometry_utils\n'), ((3439, 3466), 'tensorflow.round', 'tf.round', (['(all_rys / half_pi)'], {}), '(all_rys / half_pi)\n', (3447, 3466), True, 'import tensorflow as tf\n'), ((6387, 6424), 'numpy.dot', 'np.dot', (['vec_mid_34_p1', 'vec_34_12_norm'], {}), '(vec_mid_34_p1, vec_34_12_norm)\n', (6393, 6424), True, 'import numpy as np\n'), ((6438, 6475), 'numpy.dot', 'np.dot', (['vec_mid_34_p2', 'vec_34_12_norm'], {}), '(vec_mid_34_p2, vec_34_12_norm)\n', (6444, 6475), True, 'import numpy as np\n'), ((6489, 6526), 'numpy.dot', 'np.dot', (['vec_mid_34_p3', 'vec_34_12_norm'], {}), '(vec_mid_34_p3, vec_34_12_norm)\n', (6495, 6526), True, 'import numpy as np\n'), ((6540, 6577), 'numpy.dot', 'np.dot', (['vec_mid_34_p4', 'vec_34_12_norm'], {}), '(vec_mid_34_p4, vec_34_12_norm)\n', (6546, 6577), True, 'import numpy as np\n'), ((6634, 6654), 'numpy.amin', 'np.amin', (['all_lengths'], {}), '(all_lengths)\n', (6641, 6654), True, 'import numpy as np\n'), ((6671, 6691), 'numpy.amax', 'np.amax', (['all_lengths'], {}), '(all_lengths)\n', (6678, 6691), True, 'import numpy as np\n'), ((6749, 6800), 'numpy.asarray', 'np.asarray', (['[-vec_34_12_norm[1], vec_34_12_norm[0]]'], {}), '([-vec_34_12_norm[1], vec_34_12_norm[0]])\n', (6759, 6800), True, 'import numpy as np\n'), ((6814, 6847), 'numpy.dot', 'np.dot', (['vec_mid_34_p1', 'ortho_norm'], {}), '(vec_mid_34_p1, ortho_norm)\n', (6820, 6847), True, 'import numpy as np\n'), ((6861, 6894), 'numpy.dot', 'np.dot', (['vec_mid_34_p2', 'ortho_norm'], {}), '(vec_mid_34_p2, ortho_norm)\n', (6867, 6894), True, 'import numpy as np\n'), ((6908, 6941), 'numpy.dot', 'np.dot', (['vec_mid_34_p3', 'ortho_norm'], {}), '(vec_mid_34_p3, ortho_norm)\n', (6914, 6941), True, 'import numpy as np\n'), ((6955, 6988), 'numpy.dot', 'np.dot', (['vec_mid_34_p4', 'ortho_norm'], {}), '(vec_mid_34_p4, ortho_norm)\n', (6961, 6988), True, 'import numpy as np\n'), ((7044, 7063), 'numpy.amin', 'np.amin', (['all_widths'], {}), '(all_widths)\n', (7051, 7063), True, 'import numpy as np\n'), ((7080, 7099), 'numpy.amax', 'np.amax', (['all_widths'], {}), '(all_widths)\n', (7087, 7099), True, 'import numpy as np\n'), ((7627, 7664), 'numpy.dot', 'np.dot', (['vec_mid_23_p1', 'vec_23_14_norm'], {}), '(vec_mid_23_p1, vec_23_14_norm)\n', (7633, 7664), True, 'import numpy as np\n'), ((7678, 7715), 'numpy.dot', 'np.dot', (['vec_mid_23_p2', 'vec_23_14_norm'], {}), '(vec_mid_23_p2, vec_23_14_norm)\n', (7684, 7715), True, 'import numpy as np\n'), ((7729, 7766), 'numpy.dot', 'np.dot', (['vec_mid_23_p3', 'vec_23_14_norm'], {}), '(vec_mid_23_p3, vec_23_14_norm)\n', (7735, 7766), True, 'import numpy as np\n'), ((7780, 7817), 'numpy.dot', 'np.dot', (['vec_mid_23_p4', 'vec_23_14_norm'], {}), '(vec_mid_23_p4, vec_23_14_norm)\n', (7786, 7817), True, 'import numpy as np\n'), ((7874, 7894), 'numpy.amin', 'np.amin', (['all_lengths'], {}), '(all_lengths)\n', (7881, 7894), True, 'import numpy as np\n'), ((7911, 7931), 'numpy.amax', 'np.amax', (['all_lengths'], {}), '(all_lengths)\n', (7918, 7931), True, 'import numpy as np\n'), ((7990, 8041), 'numpy.asarray', 'np.asarray', (['[-vec_23_14_norm[1], vec_23_14_norm[0]]'], {}), '([-vec_23_14_norm[1], vec_23_14_norm[0]])\n', (8000, 8041), True, 'import numpy as np\n'), ((8055, 8088), 'numpy.dot', 'np.dot', (['vec_mid_23_p1', 'ortho_norm'], {}), '(vec_mid_23_p1, ortho_norm)\n', (8061, 8088), True, 'import numpy as np\n'), ((8102, 8135), 'numpy.dot', 'np.dot', (['vec_mid_23_p2', 'ortho_norm'], {}), '(vec_mid_23_p2, ortho_norm)\n', (8108, 8135), True, 'import numpy as np\n'), ((8149, 8182), 'numpy.dot', 'np.dot', (['vec_mid_23_p3', 'ortho_norm'], {}), '(vec_mid_23_p3, ortho_norm)\n', (8155, 8182), True, 'import numpy as np\n'), ((8196, 8229), 'numpy.dot', 'np.dot', (['vec_mid_23_p4', 'ortho_norm'], {}), '(vec_mid_23_p4, ortho_norm)\n', (8202, 8229), True, 'import numpy as np\n'), ((8285, 8304), 'numpy.amin', 'np.amin', (['all_widths'], {}), '(all_widths)\n', (8292, 8304), True, 'import numpy as np\n'), ((8321, 8340), 'numpy.amax', 'np.amax', (['all_widths'], {}), '(all_widths)\n', (8328, 8340), True, 'import numpy as np\n'), ((9973, 10005), 'tensorflow.reshape', 'tf.reshape', (['vec_dir_mag', '[-1, 1]'], {}), '(vec_dir_mag, [-1, 1])\n', (9983, 10005), True, 'import tensorflow as tf\n'), ((10155, 10192), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p1', 'vec_dir_norm'], {}), '(vec_mid_p1, vec_dir_norm)\n', (10166, 10192), True, 'import tensorflow as tf\n'), ((10225, 10262), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p2', 'vec_dir_norm'], {}), '(vec_mid_p2, vec_dir_norm)\n', (10236, 10262), True, 'import tensorflow as tf\n'), ((10295, 10332), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p3', 'vec_dir_norm'], {}), '(vec_mid_p3, vec_dir_norm)\n', (10306, 10332), True, 'import tensorflow as tf\n'), ((10365, 10402), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p4', 'vec_dir_norm'], {}), '(vec_mid_p4, vec_dir_norm)\n', (10376, 10402), True, 'import tensorflow as tf\n'), ((10767, 10810), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p1', 'vec_dir_ortho_norm'], {}), '(vec_mid_p1, vec_dir_ortho_norm)\n', (10778, 10810), True, 'import tensorflow as tf\n'), ((10878, 10921), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p2', 'vec_dir_ortho_norm'], {}), '(vec_mid_p2, vec_dir_ortho_norm)\n', (10889, 10921), True, 'import tensorflow as tf\n'), ((10989, 11032), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p3', 'vec_dir_ortho_norm'], {}), '(vec_mid_p3, vec_dir_ortho_norm)\n', (11000, 11032), True, 'import tensorflow as tf\n'), ((11100, 11143), 'tensorflow.multiply', 'tf.multiply', (['vec_mid_p4', 'vec_dir_ortho_norm'], {}), '(vec_mid_p4, vec_dir_ortho_norm)\n', (11111, 11143), True, 'import tensorflow as tf\n'), ((13322, 13357), 'tensorflow.cast', 'tf.cast', (['vec_34_12_mask', 'tf.float32'], {}), '(vec_34_12_mask, tf.float32)\n', (13329, 13357), True, 'import tensorflow as tf\n'), ((13415, 13450), 'tensorflow.cast', 'tf.cast', (['vec_23_14_mask', 'tf.float32'], {}), '(vec_23_14_mask, tf.float32)\n', (13422, 13450), True, 'import tensorflow as tf\n'), ((3908, 3946), 'tensorflow.stack', 'tf.stack', (['[zeros, zeros, ones]'], {'axis': '(1)'}), '([zeros, zeros, ones], axis=1)\n', (3916, 3946), True, 'import tensorflow as tf\n'), ((7184, 7222), 'numpy.arctan2', 'np.arctan2', (['vec_34_12[1]', 'vec_34_12[0]'], {}), '(vec_34_12[1], vec_34_12[0])\n', (7194, 7222), True, 'import numpy as np\n'), ((8425, 8463), 'numpy.arctan2', 'np.arctan2', (['vec_23_14[1]', 'vec_23_14[0]'], {}), '(vec_23_14[1], vec_23_14[0])\n', (8435, 8463), True, 'import numpy as np\n'), ((11458, 11496), 'tensorflow.atan2', 'tf.atan2', (['vec_dir[:, 1]', 'vec_dir[:, 0]'], {}), '(vec_dir[:, 1], vec_dir[:, 0])\n', (11466, 11496), True, 'import tensorflow as tf\n'), ((1647, 1662), 'numpy.cos', 'np.cos', (['ry_diff'], {}), '(ry_diff)\n', (1653, 1662), True, 'import numpy as np\n'), ((1664, 1679), 'numpy.sin', 'np.sin', (['ry_diff'], {}), '(ry_diff)\n', (1670, 1679), True, 'import numpy as np\n'), ((1736, 1751), 'numpy.cos', 'np.cos', (['ry_diff'], {}), '(ry_diff)\n', (1742, 1751), True, 'import numpy as np\n'), ((1719, 1734), 'numpy.sin', 'np.sin', (['ry_diff'], {}), '(ry_diff)\n', (1725, 1734), True, 'import numpy as np\n'), ((3763, 3779), 'tensorflow.cos', 'tf.cos', (['ry_diffs'], {}), '(ry_diffs)\n', (3769, 3779), True, 'import tensorflow as tf\n'), ((3781, 3797), 'tensorflow.sin', 'tf.sin', (['ry_diffs'], {}), '(ry_diffs)\n', (3787, 3797), True, 'import tensorflow as tf\n'), ((3859, 3875), 'tensorflow.cos', 'tf.cos', (['ry_diffs'], {}), '(ry_diffs)\n', (3865, 3875), True, 'import tensorflow as tf\n'), ((3841, 3857), 'tensorflow.sin', 'tf.sin', (['ry_diffs'], {}), '(ry_diffs)\n', (3847, 3857), True, 'import tensorflow as tf\n')] |
# Copyright (C) 2020 Zurich Instruments
#
# This software may be modified and distributed under the terms
# of the MIT license. See the LICENSE file for details.
import numpy as np
import matplotlib.pyplot as plt
import textwrap
import time
def write_crosstalk_matrix(daq, device, matrix):
"""
Writes the given matrix to the QA Setup crosstalk matrix of the UHFQA.
Arguments:
daq (zhinst.ziDAQServer) -- Connection to the Data Server
device (String) -- device ID, e.g. "dev2266"
matrix (2D array) -- crosstalk matrix to be written to the QA Setup tab
"""
rows, cols = matrix.shape
for r in range(rows):
for c in range(cols):
node = f"/{device}/qas/0/crosstalk/rows/{r}/cols/{c}"
daq.setDouble(node, matrix[r, c])
return
def sequence_multiplexed_readout(
channels,
frequencies,
n_averages,
state=None,
):
"""
Returns an AWG sequence program (String) that specifies
the sequence for multiplexed readout. Amplitudes and phases
are hardcoded in the function for up to 10 channels and for
ground and excited qubit states (simulated response of a
readout resonator for qubit in either ground or excited state).
Arguments:
channels (int) -- indices of channels to create readout pulses for
frequencies (float) -- frequencies (in Hz) of readout pulses
n_averages )int) -- number of repetitions
Keyword Arguments:
state (int) -- states of measured channels to be simulated, 0 or 1
Returns:
(String) -- awg sequence program as string
"""
# hard coded parameters for pulse parameters here... for ground and excited state (+ delta)
amplitudes = np.array([0.13, 0.15, 0.16, 0.15, 0.14, 0.13, 0.17, 0.23, 0.19, 0.11])/20
phases = np.zeros(10)
deltas_amplitude = np.array([0.02, 0.01, -0.01, 0.02, -0.012, 0.0, 0.02, -0.012, 0.06, 0.03])
deltas_phase = np.array([0.23, 0.31, 0.26, -0.171, 0.28, -0.31, 0.19, -0.21, 0.091, 0.29]) * np.pi /4
n_channels = len(channels)
assert len(frequencies) >= max(channels), "Not enough readout frequencies specified!"
if state is None:
state = [0] * n_channels
for i, ch in enumerate(channels):
if frequencies[ch] < 0:
frequencies[ch] = abs(frequencies[ch])
# decide what to do here
if state[i]:
amplitudes[ch] = amplitudes[ch] * (1 + deltas_amplitude[ch])
phases[ch] += deltas_phase[ch]
# text snippet for the initialization of awg sequence
awg_program_init = textwrap.dedent(
"""\
const samplingRate = 1.8e9;
// parameters for envelope
const riseTime = 30e-9;
const fallTime = 30e-9;
const flatTime = 200e-9;
const rise = riseTime * samplingRate;
const fall = fallTime * samplingRate;
const length = flatTime * samplingRate;
const totalLength = rise + length + fall;
// define waveforms
wave w_gauss_rise = gauss(2*rise, rise, rise/4);
wave w_gauss_fall = gauss(2*fall, fall, fall/4);
wave w_rise = cut(w_gauss_rise, 0, rise);
wave w_fall = cut(w_gauss_fall, fall, 2*fall-1);
wave w_flat = rect(length, 1.0);
wave w_pad = zeros((totalLength-1)%16);
// combine to total envelope
wave readoutPulse = 1.0*join(w_rise, w_flat, w_fall, w_pad) + 0.0* w_gauss_rise;
// init empty final waveforms
wave w_I = zeros(totalLength);
wave w_Q = zeros(totalLength);
"""
)
# text snippet for single pulse
awg_program_singlePulse = textwrap.dedent(
"""\
// modulate envelope for readout pulse *N*
const f*N*_readout = _Frequency*N*_ ;
wave w*N*_I = _Amplitude*N*_ * readoutPulse * cosine(totalLength, 1, _Phase*N*_, f*N*_readout*totalLength/samplingRate);
wave w*N*_Q = _Amplitude*N*_ * readoutPulse * sine(totalLength, 1, _Phase*N*_, f*N*_readout*totalLength/samplingRate);
w_I = add(w_I, w*N*_I);
w_Q = add(w_Q, w*N*_Q);
"""
)
# text snippet for main loop of .seqC
awg_program_playWave = textwrap.dedent(
"""\
// play waveform
setTrigger(AWG_INTEGRATION_ARM);
var result_averages = _nAverages_ ;
repeat (result_averages) {
playWave(w_I, w_Q);
setTrigger(AWG_INTEGRATION_ARM + AWG_INTEGRATION_TRIGGER + AWG_MONITOR_TRIGGER + 1);
setTrigger(AWG_INTEGRATION_ARM);
waitWave();
wait(1024);
}
setTrigger(0);
"""
)
# add all the pulses for N = ... readout channels
# add each channel and replace indices for readout frequencies ...
awg_program_pulses = ""
for ch in channels:
awg_program_pulses = awg_program_pulses + awg_program_singlePulse.replace(
"*N*", str(ch)
)
# replace parameters in sequence program
awg_program_pulses = awg_program_pulses.replace("_nChannels_", str(n_channels))
for ch in channels:
awg_program_pulses = awg_program_pulses.replace(
f"_Frequency{ch}_", str(frequencies[ch])
)
awg_program_pulses = awg_program_pulses.replace(
f"_Amplitude{ch}_", str(amplitudes[ch])
)
awg_program_pulses = awg_program_pulses.replace(
f"_Phase{ch}_", str(phases[ch])
)
awg_program_playWave = awg_program_playWave.replace("_nAverages_", str(n_averages))
return awg_program_init + awg_program_pulses + awg_program_playWave
def compile_sequence(awg_module, awg_program):
"""
Starts compilation of AWG sequence program dn loads it to device.
Arguments:
awg_module (awgModule) -- awgModule Object of AWG
awg_program (String) -- specifies the awg sequence in .seqC format
"""
awg_module.set("compiler/sourcestring", awg_program)
while awg_module.getInt("compiler/status") == -1:
time.sleep(0.1)
assert awg_module.getInt("compiler/status") != 1, awg_module.getString(
"/compiler/statusstring"
)
if awg_module.get("compiler/status") == 0:
print("Compilation successful!")
def generate_demod_weights(length, frequency, samplingRate=1.8e9, plot=False, phase=0):
assert length <= 4096
assert frequency > 0
x = np.arange(0, length)
y = np.sin(2 * np.pi * frequency * x / samplingRate + phase)
return y
def run_awg(daq, device):
"""
Runs AWG sequence. Sets AWG to single shots and enables AWG.
Arguments:
daq (zhinst.ziDAQServer) -- Data Server Object
device (String) -- device ID, e.g. "dev2266"
"""
daq.asyncSetInt(f"/{device}/awgs/0/single", 1)
daq.syncSetInt(f"/{device}/awgs/0/enable", 1)
def toggle_outputs(daq, device, channel=None):
"""
Toggles signal output of UHFQA. If no channel specified toggles both.
Arguments:
daq (zhinst.ziDAQServer) -- Data Server Object
device (String) -- device ID, e.g. "dev2266"
Keyword Arguments:
channel (int) -- list of channels to be toggled (in 0, 1)
(default: None)
"""
if channel is not None:
assert channel in [0, 1]
channel = [channel]
else:
channel = [0, 1]
for ch in channel:
path = f"/{device}/sigouts/{ch}/on"
if daq.getInt(path) == 0:
daq.setInt(path, 1)
elif daq.getInt(path) == 1:
daq.setInt(path, 0)
def set_integration_weights(
daq,
device,
weights,
channel,
quadrature="real",
demod_frequency=None
):
"""
Sets the integration weights of the UHFQA. The input signals
are multiplied with the integrtion weights for each channel.
Arguments:
daq (zhinst.ziDAQServer) -- Data Server Object
device (String) -- device ID, e.g. "dev2266"
weights (double) -- list of double describing the integration
weights to be set, max. length is 4096
channel (int) -- index of channel to set weights of
Keyword Arguments:
quadrature (str) -- quadrature of weights to be set,
either 'imag' or 'real' (default: 'real')
demod_frequency (double) -- frequency for demodulation
(default: None)
"""
monitor_length = daq.getInt(f"/{device}/qas/0/monitor/length")
integration_length = len(weights)
assert integration_length <= 4096
assert channel in range(10)
assert quadrature in ["real", "imag"]
# if weight is only one point, set constant weight for total length
if len(weights) == 1:
weights = weights * np.ones(monitor_length)
# set lengths to the same, smallest value
if integration_length > monitor_length:
weights = weights[:monitor_length]
integration_length = monitor_length
if integration_length < monitor_length:
monitor_length = integration_length
# generate weights for digital demodulation
if demod_frequency is not None:
demod_weights = generate_demod_weights(integration_length, demod_frequency)
else:
demod_weights = np.ones(integration_length)
# generate weights
integration_weights = weights * demod_weights
# reset
daq.setInt(f"/{device}/qas/0/integration/length", 4096)
daq.setVector(
f"/{device}/qas/0/integration/weights/{channel}/{quadrature}",
np.zeros(4096),
)
# set
daq.setInt(f"/{device}/qas/0/integration/length", integration_length)
daq.setVector(
f"/{device}/qas/0/integration/weights/{channel}/{quadrature}",
integration_weights,
)
def reset_integration_weights(daq, device, channels=range(10)):
"""
Resets the integration weights of the UHFQA to all zeros.
If no channel specified all are reset.
Arguments:
daq (zhinst.ziDAQServer) -- Data Server Object
device (String) -- device ID, e.g. "dev2266"
Keyword Arguments:
channels (int) -- list of indeces of channels to be reset
(default: range(10))
"""
daq.setInt(f"/{device}/qas/0/integration/length", 4096)
for ch in channels:
daq.setVector(
f"/{device}/qas/0/integration/weights/{ch}/real",
np.zeros(4096),
)
daq.setVector(
f"/{device}/qas/0/integration/weights/{ch}/imag",
np.zeros(4096),
)
def set_qa_results(daq, device, result_length, result_averages, source="integration"):
"""
Applies settings to the QA Results tab.
Arguments:
daq (zhinst.ziDAQServer) -- Data Server Object
device (String) -- device ID, e.g. "dev2266"
result_length (int) -- number of samples to be recorded
result_averages (int) -- number of averages for results
Keyword Arguments:
source (str) -- specifies data source of QA
"integratio", "rotation" or "threshold"
(default: "integration")
"""
if source == "integration":
source = 7
elif source == "rotation":
source = 2
elif source == "threshold":
source = 1
settings = [
("qas/0/result/enable", 0),
("qas/0/result/reset", 1),
("qas/0/result/length", result_length),
("qas/0/result/averages", result_averages),
("qas/0/result/source", source),
("qas/0/result/enable", 1),
]
daq.set([(f"/{device}/{node}", value) for node, value in settings])
def set_qa_monitor(daq, device, monitor_length, averages):
"""
Applies settings to the QA Monitor tab.
Arguments:
daq (zhinst.ziDAQServer) -- Data Server Object
device (String) -- device ID, e.g. "dev2266"
monitor_length (int) -- number of samples recorded in monitor tab
averages (int) -- number of averages for monitor tab
"""
settings = [
("qas/0/monitor/enable", 0),
("qas/0/monitor/reset", 1),
("qas/0/monitor/length", monitor_length),
("qas/0/monitor/averages", averages),
("qas/0/monitor/enable", 1),
]
daq.set([(f"/{device}/{node}", value) for node, value in settings])
def optimal_integration_weights(
daq,
device,
awg,
channel,
frequencies,
plot=False,
delay=None,
):
"""
Sets the optimal integration weights for specified channel.
Measures IQ traces for channel being in ground/excited state
and takes difference as optimal weights.
Arguments:
daq (zhinst.ziDAQServer) -- Data Server Object
device (String) -- device ID, e.g. "dev2266"
awg (awgModule) -- awgModule() Object of AWG
channel (int) -- index of channel to set weights for
frequencies (float) -- list of readout frequencies for all channels
Keyword Arguments:
plot (bool) -- if set, detailed plots are shown (default: False)
delay (int) -- number of samples at the beginning of weights array
that are set to 0 (default: None)
"""
daq.flush()
monitor_length = daq.getInt(f"/{device}/qas/0/monitor/length")
monitor_averages = daq.getInt(f"/{device}/qas/0/monitor/averages")
reset_integration_weights(daq, device, channels=[channel])
monitor_paths = [
f"/{device}/qas/0/monitor/inputs/0/wave",
f"/{device}/qas/0/monitor/inputs/1/wave",
]
monitor_list = []
ground_state = [0]
excited_state = [1]
for state in [ground_state, excited_state]:
print(f"Channel {channel} in state |{','.join(str(num) for num in state)}>", flush=True)
# set up AWG sequence program
# readout pulse for only single channel!
awg_program = sequence_multiplexed_readout(
[channel],
frequencies,
monitor_averages,
state=state
)
compile_sequence(awg, awg_program)
# ensure all settings are synced and subscribe to monitor paths
daq.sync()
time.sleep(0.1)
daq.subscribe(monitor_paths)
# run AWG sequence and start acquisition
run_awg(daq, device)
# poll data
monitor_list.append(acquisition_poll(daq, monitor_paths, monitor_length))
print("\t\t--> Data acquired")
# unsubscribe immediately after aquisition!
daq.unsubscribe(monitor_paths)
waves = []
for polldata in monitor_list:
for path, data in polldata.items():
waves.append(data)
wave_I_0 = waves[0]
wave_Q_0 = waves[1]
wave_I_1 = waves[2]
wave_Q_1 = waves[3]
weights_I = wave_I_1 - wave_I_0
weights_Q = wave_Q_1 - wave_Q_0
weights_I = weights_I / np.max(np.abs(weights_I))
weights_Q = weights_Q / np.max(np.abs(weights_Q))
if delay is not None:
weights_I[:delay] = 0
weights_Q[:delay] = 0
set_integration_weights(
daq, device, weights_I, channel, quadrature="real"
)
set_integration_weights(
daq, device, weights_Q, channel, quadrature="imag"
)
if plot:
# set up plot
fig, (ax1, ax2, ax3) = plt.subplots(3, figsize=[10, 8])
ax1.grid("on")
ax1.plot(wave_I_0, label="Input I", color=plt.cm.tab20(0))
ax1.plot(wave_Q_0, label="Input Q", color=plt.cm.tab20(1))
ax1.legend(frameon=False, loc=3)
ax1.set_title("Qubit in ground state", position=[0.15, 0.7])
ax1.set_xlim([0, monitor_length])
ax2.grid("on")
ax2.plot(wave_I_1, label="Input I", color=plt.cm.tab20(0))
ax2.plot(wave_Q_1, label="Input Q", color=plt.cm.tab20(1))
ax2.legend(frameon=False, loc=3)
ax2.set_title("Qubit in excited state", position=[0.15, 0.7])
ax2.set_xlim([0, monitor_length])
ax3.axvline(delay, c="k", linewidth=0.5)
ax3.grid("on")
ax3.plot(weights_I, label="Weight I", color=plt.cm.tab20(0))
ax3.plot(weights_Q, label="Weight Q", color=plt.cm.tab20(1))
ax3.legend(frameon=False, loc=3)
ax3.set_title("Integration weights for I/Q", position=[0.15, 0.7])
ax3.set_xlim([0, monitor_length])
ax3.set_xlabel("Samples")
plt.show()
def acquisition_poll(daq, paths, num_samples, timeout=10.0):
""" Polls the UHFQA for data. Taken from zhinst.examples.uhfqa.common
Args:
paths (list): list of subscribed paths
num_samples (int): expected number of samples
timeout (float): time in seconds before timeout Error is raised.
"""
poll_length = 0.001 # s
poll_timeout = 500 # ms
poll_flags = 0
poll_return_flat_dict = True
# Keep list of recorded chunks of data for each subscribed path
chunks = {p: [] for p in paths}
gotem = {p: False for p in paths}
# Poll data
time = 0
while time < timeout and not all(gotem.values()):
dataset = daq.poll(poll_length, poll_timeout, poll_flags, poll_return_flat_dict)
for p in paths:
if p not in dataset:
continue
for v in dataset[p]:
chunks[p].append(v['vector'])
num_obtained = sum([len(x) for x in chunks[p]])
if num_obtained >= num_samples:
gotem[p] = True
time += poll_length
if not all(gotem.values()):
for p in paths:
num_obtained = sum([len(x) for x in chunks[p]])
print('Path {}: Got {} of {} samples'.format(p, num_obtained, num_samples))
raise Exception('Timeout Error: Did not get all results within {:.1f} s!'.format(timeout))
# Return dict of flattened data
return {p: np.concatenate(v) for p, v in chunks.items()}
if __name__ == "__name__":
pass
| [
"textwrap.dedent",
"numpy.abs",
"numpy.ones",
"matplotlib.pyplot.cm.tab20",
"time.sleep",
"numpy.array",
"numpy.zeros",
"numpy.concatenate",
"numpy.sin",
"matplotlib.pyplot.subplots",
"numpy.arange",
"matplotlib.pyplot.show"
] | [((1822, 1834), 'numpy.zeros', 'np.zeros', (['(10)'], {}), '(10)\n', (1830, 1834), True, 'import numpy as np\n'), ((1858, 1932), 'numpy.array', 'np.array', (['[0.02, 0.01, -0.01, 0.02, -0.012, 0.0, 0.02, -0.012, 0.06, 0.03]'], {}), '([0.02, 0.01, -0.01, 0.02, -0.012, 0.0, 0.02, -0.012, 0.06, 0.03])\n', (1866, 1932), True, 'import numpy as np\n'), ((2609, 3588), 'textwrap.dedent', 'textwrap.dedent', (['""" const samplingRate = 1.8e9;\n // parameters for envelope\n const riseTime = 30e-9;\n const fallTime = 30e-9;\n const flatTime = 200e-9;\n const rise = riseTime * samplingRate;\n const fall = fallTime * samplingRate;\n const length = flatTime * samplingRate;\n const totalLength = rise + length + fall;\n // define waveforms\n wave w_gauss_rise = gauss(2*rise, rise, rise/4);\n wave w_gauss_fall = gauss(2*fall, fall, fall/4);\n wave w_rise = cut(w_gauss_rise, 0, rise);\n wave w_fall = cut(w_gauss_fall, fall, 2*fall-1);\n wave w_flat = rect(length, 1.0); \n wave w_pad = zeros((totalLength-1)%16);\n // combine to total envelope\n wave readoutPulse = 1.0*join(w_rise, w_flat, w_fall, w_pad) + 0.0* w_gauss_rise;\n\n // init empty final waveforms\n wave w_I = zeros(totalLength);\n wave w_Q = zeros(totalLength);\n\n """'], {}), '(\n """ const samplingRate = 1.8e9;\n // parameters for envelope\n const riseTime = 30e-9;\n const fallTime = 30e-9;\n const flatTime = 200e-9;\n const rise = riseTime * samplingRate;\n const fall = fallTime * samplingRate;\n const length = flatTime * samplingRate;\n const totalLength = rise + length + fall;\n // define waveforms\n wave w_gauss_rise = gauss(2*rise, rise, rise/4);\n wave w_gauss_fall = gauss(2*fall, fall, fall/4);\n wave w_rise = cut(w_gauss_rise, 0, rise);\n wave w_fall = cut(w_gauss_fall, fall, 2*fall-1);\n wave w_flat = rect(length, 1.0); \n wave w_pad = zeros((totalLength-1)%16);\n // combine to total envelope\n wave readoutPulse = 1.0*join(w_rise, w_flat, w_fall, w_pad) + 0.0* w_gauss_rise;\n\n // init empty final waveforms\n wave w_I = zeros(totalLength);\n wave w_Q = zeros(totalLength);\n\n """\n )\n', (2624, 3588), False, 'import textwrap\n'), ((3662, 4118), 'textwrap.dedent', 'textwrap.dedent', (['""" // modulate envelope for readout pulse *N*\n const f*N*_readout = _Frequency*N*_ ;\n wave w*N*_I = _Amplitude*N*_ * readoutPulse * cosine(totalLength, 1, _Phase*N*_, f*N*_readout*totalLength/samplingRate);\n wave w*N*_Q = _Amplitude*N*_ * readoutPulse * sine(totalLength, 1, _Phase*N*_, f*N*_readout*totalLength/samplingRate);\n w_I = add(w_I, w*N*_I);\n w_Q = add(w_Q, w*N*_Q);\n\n """'], {}), '(\n """ // modulate envelope for readout pulse *N*\n const f*N*_readout = _Frequency*N*_ ;\n wave w*N*_I = _Amplitude*N*_ * readoutPulse * cosine(totalLength, 1, _Phase*N*_, f*N*_readout*totalLength/samplingRate);\n wave w*N*_Q = _Amplitude*N*_ * readoutPulse * sine(totalLength, 1, _Phase*N*_, f*N*_readout*totalLength/samplingRate);\n w_I = add(w_I, w*N*_I);\n w_Q = add(w_Q, w*N*_Q);\n\n """\n )\n', (3677, 4118), False, 'import textwrap\n'), ((4195, 4633), 'textwrap.dedent', 'textwrap.dedent', (['""" // play waveform\n setTrigger(AWG_INTEGRATION_ARM);\n var result_averages = _nAverages_ ;\n repeat (result_averages) {\n playWave(w_I, w_Q);\n setTrigger(AWG_INTEGRATION_ARM + AWG_INTEGRATION_TRIGGER + AWG_MONITOR_TRIGGER + 1);\n setTrigger(AWG_INTEGRATION_ARM);\n waitWave();\n wait(1024);\n }\n setTrigger(0);\n\n """'], {}), '(\n """ // play waveform\n setTrigger(AWG_INTEGRATION_ARM);\n var result_averages = _nAverages_ ;\n repeat (result_averages) {\n playWave(w_I, w_Q);\n setTrigger(AWG_INTEGRATION_ARM + AWG_INTEGRATION_TRIGGER + AWG_MONITOR_TRIGGER + 1);\n setTrigger(AWG_INTEGRATION_ARM);\n waitWave();\n wait(1024);\n }\n setTrigger(0);\n\n """\n )\n', (4210, 4633), False, 'import textwrap\n'), ((6374, 6394), 'numpy.arange', 'np.arange', (['(0)', 'length'], {}), '(0, length)\n', (6383, 6394), True, 'import numpy as np\n'), ((6403, 6459), 'numpy.sin', 'np.sin', (['(2 * np.pi * frequency * x / samplingRate + phase)'], {}), '(2 * np.pi * frequency * x / samplingRate + phase)\n', (6409, 6459), True, 'import numpy as np\n'), ((1735, 1805), 'numpy.array', 'np.array', (['[0.13, 0.15, 0.16, 0.15, 0.14, 0.13, 0.17, 0.23, 0.19, 0.11]'], {}), '([0.13, 0.15, 0.16, 0.15, 0.14, 0.13, 0.17, 0.23, 0.19, 0.11])\n', (1743, 1805), True, 'import numpy as np\n'), ((6006, 6021), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (6016, 6021), False, 'import time\n'), ((9242, 9269), 'numpy.ones', 'np.ones', (['integration_length'], {}), '(integration_length)\n', (9249, 9269), True, 'import numpy as np\n'), ((9515, 9529), 'numpy.zeros', 'np.zeros', (['(4096)'], {}), '(4096)\n', (9523, 9529), True, 'import numpy as np\n'), ((14142, 14157), 'time.sleep', 'time.sleep', (['(0.1)'], {}), '(0.1)\n', (14152, 14157), False, 'import time\n'), ((15254, 15286), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)'], {'figsize': '[10, 8]'}), '(3, figsize=[10, 8])\n', (15266, 15286), True, 'import matplotlib.pyplot as plt\n'), ((16328, 16338), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (16336, 16338), True, 'import matplotlib.pyplot as plt\n'), ((17788, 17805), 'numpy.concatenate', 'np.concatenate', (['v'], {}), '(v)\n', (17802, 17805), True, 'import numpy as np\n'), ((1953, 2028), 'numpy.array', 'np.array', (['[0.23, 0.31, 0.26, -0.171, 0.28, -0.31, 0.19, -0.21, 0.091, 0.29]'], {}), '([0.23, 0.31, 0.26, -0.171, 0.28, -0.31, 0.19, -0.21, 0.091, 0.29])\n', (1961, 2028), True, 'import numpy as np\n'), ((8749, 8772), 'numpy.ones', 'np.ones', (['monitor_length'], {}), '(monitor_length)\n', (8756, 8772), True, 'import numpy as np\n'), ((10383, 10397), 'numpy.zeros', 'np.zeros', (['(4096)'], {}), '(4096)\n', (10391, 10397), True, 'import numpy as np\n'), ((10506, 10520), 'numpy.zeros', 'np.zeros', (['(4096)'], {}), '(4096)\n', (10514, 10520), True, 'import numpy as np\n'), ((14838, 14855), 'numpy.abs', 'np.abs', (['weights_I'], {}), '(weights_I)\n', (14844, 14855), True, 'import numpy as np\n'), ((14892, 14909), 'numpy.abs', 'np.abs', (['weights_Q'], {}), '(weights_Q)\n', (14898, 14909), True, 'import numpy as np\n'), ((15361, 15376), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(0)'], {}), '(0)\n', (15373, 15376), True, 'import matplotlib.pyplot as plt\n'), ((15428, 15443), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(1)'], {}), '(1)\n', (15440, 15443), True, 'import matplotlib.pyplot as plt\n'), ((15671, 15686), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(0)'], {}), '(0)\n', (15683, 15686), True, 'import matplotlib.pyplot as plt\n'), ((15738, 15753), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(1)'], {}), '(1)\n', (15750, 15753), True, 'import matplotlib.pyplot as plt\n'), ((16033, 16048), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(0)'], {}), '(0)\n', (16045, 16048), True, 'import matplotlib.pyplot as plt\n'), ((16102, 16117), 'matplotlib.pyplot.cm.tab20', 'plt.cm.tab20', (['(1)'], {}), '(1)\n', (16114, 16117), True, 'import matplotlib.pyplot as plt\n')] |
import cv2 as cv
import math
import numpy as np
import os
from src import variables
from src.depth_parser import DepthParser
from src.disparity_calculator import DisparityCalculator
from src.image_matcher import ImageMatcher
from src.point_cloud_builder import PointCloudBuilder
from src.point_cloud_merger import PointCloudMerger
def __is_vertex_valid__(vertex):
return math.inf not in vertex and (-math.inf) not in vertex and math.nan not in vertex
def __save_point_cloud_to_file__(vertices, colors, file_path):
vertices = vertices.reshape(-1, 3)
colors = colors.reshape(-1, 3)
vertices = np.hstack([vertices, colors])
vertices = [v for v in vertices if __is_vertex_valid__(v)]
if len(vertices) != 0:
with open(file_path, "wb") as f:
f.write((variables.ply_header % dict(vert_num=len(vertices))).encode("utf-8"))
np.savetxt(f, vertices, fmt="%f %f %f %d %d %d ")
disparity_calculator = DisparityCalculator()
depth_parser = DepthParser()
cloud_builder = PointCloudBuilder()
cloud_merger = PointCloudMerger()
data = []
file_names = os.listdir(variables.left_captures_path)
for file_name in file_names:
print(file_name)
left_path = "{}/{}".format(variables.left_captures_path, file_name)
right_path = "{}/{}".format(variables.right_captures_path, file_name)
file_name_without_extension = file_name.split(".")[0]
disp_file_name = "disp_{}.png".format(file_name_without_extension)
disp_path = "{}/{}".format(variables.disparity_path, disp_file_name)
ply_file_name = file_name_without_extension + ".ply"
ply_path = "{}/{}".format(variables.point_clouds_path, ply_file_name)
depth_map_file_name = file_name_without_extension + ".csv"
depth_map_path = "{}/{}".format(variables.left_depth_maps_path, depth_map_file_name)
img_left = cv.imread(left_path)
img_right = cv.imread(right_path)
disparity_map = disparity_calculator.get_disparity_map(img_left, img_right)
# depth_map = depth_parser.get_depth_map_from_file(depth_map_path)
points, colors = cloud_builder.build_point_cloud_by_disparity(img_left, disparity_map)
# points, colors = cloud_builder.build_point_cloud_by_depth(img_left, depth_map)
# __save_point_cloud_to_file__(points, colors, ply_path)
data.append({
'disp': disparity_map,
'point_cloud': {
'points': points,
'colors': colors
}
})
# angle = 0
image_matcher = ImageMatcher()
images_data = []
for x in data:
images_data.append(image_matcher.features.orb(x['disp']))
points = np.empty(shape=(720, 0, 3))
colors = np.empty(shape=points.shape)
shift = 0
for i in range(0, len(images_data) - 1):
matches = image_matcher.matching.bruteforce(images_data[i], images_data[i + 1])
if len(matches) == 0:
continue
cloud = cloud_merger.merge_clouds(data[i]['point_cloud'], data[i+1]['point_cloud'], matches)
for k in range(0, cloud['points'].shape[0]):
for j in range(0, cloud['points'].shape[1]):
cloud['points'][k][j][1] += shift
points = np.concatenate([points, cloud['points']], axis=1)
colors = np.concatenate([colors, cloud['colors']], axis=1)
# __save_point_cloud_to_file__(cloud['points'], cloud['colors'], "out_{}.ply".format(i))
__save_point_cloud_to_file__(points, colors, "out.ply")
# TODO:
# points_1 = data[i]['point_cloud']['points'][:, :left_crop_max]
# colors_1 = data[i]['point_cloud']['colors'][:, :left_crop_max]
# __save_point_cloud_to_file__(points_1, colors_1, "out_{}_z.ply".format(i))
# __save_point_cloud_to_file__(points_1, colors_1, "out_{}_1.ply".format(i))
# points_2 = data[i+1]['point_cloud']['points'][:, right_crop_min:]
# colors_2 = data[i+1]['point_cloud']['colors'][:, right_crop_min:]
# __save_point_cloud_to_file__(points_2, colors_2, "out_{}_2.ply".format(i))
#
# for k in range(0, points_1.shape[0]):
# for j in range(0, points_1.shape[1]):
# y = points_1[k][j][1]
# z = points_1[k][j][2]
# points_1[k][j][1] = y * math.cos(angle) - z * math.sin(angle)
# points_1[k][j][2] = y * math.sin(angle) + z * math.cos(angle)
#
# angle += (left_crop_max / 1280) * (-30 * math.pi / 180)
#
# points = np.concatenate([points, points_1], axis=1)
# colors = np.concatenate([colors, colors_1], axis=1)
| [
"os.listdir",
"numpy.hstack",
"src.image_matcher.ImageMatcher",
"src.point_cloud_builder.PointCloudBuilder",
"src.point_cloud_merger.PointCloudMerger",
"src.disparity_calculator.DisparityCalculator",
"src.depth_parser.DepthParser",
"numpy.empty",
"numpy.concatenate",
"numpy.savetxt",
"cv2.imread... | [((953, 974), 'src.disparity_calculator.DisparityCalculator', 'DisparityCalculator', ([], {}), '()\n', (972, 974), False, 'from src.disparity_calculator import DisparityCalculator\n'), ((990, 1003), 'src.depth_parser.DepthParser', 'DepthParser', ([], {}), '()\n', (1001, 1003), False, 'from src.depth_parser import DepthParser\n'), ((1020, 1039), 'src.point_cloud_builder.PointCloudBuilder', 'PointCloudBuilder', ([], {}), '()\n', (1037, 1039), False, 'from src.point_cloud_builder import PointCloudBuilder\n'), ((1055, 1073), 'src.point_cloud_merger.PointCloudMerger', 'PointCloudMerger', ([], {}), '()\n', (1071, 1073), False, 'from src.point_cloud_merger import PointCloudMerger\n'), ((1098, 1138), 'os.listdir', 'os.listdir', (['variables.left_captures_path'], {}), '(variables.left_captures_path)\n', (1108, 1138), False, 'import os\n'), ((2470, 2484), 'src.image_matcher.ImageMatcher', 'ImageMatcher', ([], {}), '()\n', (2482, 2484), False, 'from src.image_matcher import ImageMatcher\n'), ((2590, 2617), 'numpy.empty', 'np.empty', ([], {'shape': '(720, 0, 3)'}), '(shape=(720, 0, 3))\n', (2598, 2617), True, 'import numpy as np\n'), ((2627, 2655), 'numpy.empty', 'np.empty', ([], {'shape': 'points.shape'}), '(shape=points.shape)\n', (2635, 2655), True, 'import numpy as np\n'), ((613, 642), 'numpy.hstack', 'np.hstack', (['[vertices, colors]'], {}), '([vertices, colors])\n', (622, 642), True, 'import numpy as np\n'), ((1840, 1860), 'cv2.imread', 'cv.imread', (['left_path'], {}), '(left_path)\n', (1849, 1860), True, 'import cv2 as cv\n'), ((1877, 1898), 'cv2.imread', 'cv.imread', (['right_path'], {}), '(right_path)\n', (1886, 1898), True, 'import cv2 as cv\n'), ((3096, 3145), 'numpy.concatenate', 'np.concatenate', (["[points, cloud['points']]"], {'axis': '(1)'}), "([points, cloud['points']], axis=1)\n", (3110, 3145), True, 'import numpy as np\n'), ((3159, 3208), 'numpy.concatenate', 'np.concatenate', (["[colors, cloud['colors']]"], {'axis': '(1)'}), "([colors, cloud['colors']], axis=1)\n", (3173, 3208), True, 'import numpy as np\n'), ((878, 927), 'numpy.savetxt', 'np.savetxt', (['f', 'vertices'], {'fmt': '"""%f %f %f %d %d %d """'}), "(f, vertices, fmt='%f %f %f %d %d %d ')\n", (888, 927), True, 'import numpy as np\n')] |
from copy import deepcopy
from typing import List
import numpy as np
import pandas as pd
import pytest
from xarray import DataArray, Dataset, Variable, concat
from xarray.core import dtypes, merge
from . import (
InaccessibleArray,
assert_array_equal,
assert_equal,
assert_identical,
requires_dask,
)
from .test_dataset import create_test_data
def test_concat_compat() -> None:
ds1 = Dataset(
{
"has_x_y": (("y", "x"), [[1, 2]]),
"has_x": ("x", [1, 2]),
"no_x_y": ("z", [1, 2]),
},
coords={"x": [0, 1], "y": [0], "z": [-1, -2]},
)
ds2 = Dataset(
{
"has_x_y": (("y", "x"), [[3, 4]]),
"has_x": ("x", [1, 2]),
"no_x_y": (("q", "z"), [[1, 2]]),
},
coords={"x": [0, 1], "y": [1], "z": [-1, -2], "q": [0]},
)
result = concat([ds1, ds2], dim="y", data_vars="minimal", compat="broadcast_equals")
assert_equal(ds2.no_x_y, result.no_x_y.transpose())
for var in ["has_x", "no_x_y"]:
assert "y" not in result[var].dims and "y" not in result[var].coords
with pytest.raises(
ValueError, match=r"coordinates in some datasets but not others"
):
concat([ds1, ds2], dim="q")
with pytest.raises(ValueError, match=r"'q' is not present in all datasets"):
concat([ds2, ds1], dim="q")
class TestConcatDataset:
@pytest.fixture
def data(self) -> Dataset:
return create_test_data().drop_dims("dim3")
def rectify_dim_order(self, data, dataset) -> Dataset:
# return a new dataset with all variable dimensions transposed into
# the order in which they are found in `data`
return Dataset(
{k: v.transpose(*data[k].dims) for k, v in dataset.data_vars.items()},
dataset.coords,
attrs=dataset.attrs,
)
@pytest.mark.parametrize("coords", ["different", "minimal"])
@pytest.mark.parametrize("dim", ["dim1", "dim2"])
def test_concat_simple(self, data, dim, coords) -> None:
datasets = [g for _, g in data.groupby(dim, squeeze=False)]
assert_identical(data, concat(datasets, dim, coords=coords))
def test_concat_merge_variables_present_in_some_datasets(self, data) -> None:
# coordinates present in some datasets but not others
ds1 = Dataset(data_vars={"a": ("y", [0.1])}, coords={"x": 0.1})
ds2 = Dataset(data_vars={"a": ("y", [0.2])}, coords={"z": 0.2})
actual = concat([ds1, ds2], dim="y", coords="minimal")
expected = Dataset({"a": ("y", [0.1, 0.2])}, coords={"x": 0.1, "z": 0.2})
assert_identical(expected, actual)
# data variables present in some datasets but not others
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
data0, data1 = deepcopy(split_data)
data1["foo"] = ("bar", np.random.randn(10))
actual = concat([data0, data1], "dim1")
expected = data.copy().assign(foo=data1.foo)
assert_identical(expected, actual)
def test_concat_2(self, data) -> None:
dim = "dim2"
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
concat_over = [k for k, v in data.coords.items() if dim in v.dims and k != dim]
actual = concat(datasets, data[dim], coords=concat_over)
assert_identical(data, self.rectify_dim_order(data, actual))
@pytest.mark.parametrize("coords", ["different", "minimal", "all"])
@pytest.mark.parametrize("dim", ["dim1", "dim2"])
def test_concat_coords_kwarg(self, data, dim, coords) -> None:
data = data.copy(deep=True)
# make sure the coords argument behaves as expected
data.coords["extra"] = ("dim4", np.arange(3))
datasets = [g for _, g in data.groupby(dim, squeeze=True)]
actual = concat(datasets, data[dim], coords=coords)
if coords == "all":
expected = np.array([data["extra"].values for _ in range(data.dims[dim])])
assert_array_equal(actual["extra"].values, expected)
else:
assert_equal(data["extra"], actual["extra"])
def test_concat(self, data) -> None:
split_data = [
data.isel(dim1=slice(3)),
data.isel(dim1=3),
data.isel(dim1=slice(4, None)),
]
assert_identical(data, concat(split_data, "dim1"))
def test_concat_dim_precedence(self, data) -> None:
# verify that the dim argument takes precedence over
# concatenating dataset variables of the same name
dim = (2 * data["dim1"]).rename("dim1")
datasets = [g for _, g in data.groupby("dim1", squeeze=False)]
expected = data.copy()
expected["dim1"] = dim
assert_identical(expected, concat(datasets, dim))
def test_concat_data_vars_typing(self) -> None:
# Testing typing, can be removed if the next function works with annotations.
data = Dataset({"foo": ("x", np.random.randn(10))})
objs: List[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
actual = concat(objs, dim="x", data_vars="minimal")
assert_identical(data, actual)
def test_concat_data_vars(self):
# TODO: annotating this func fails
data = Dataset({"foo": ("x", np.random.randn(10))})
objs: List[Dataset] = [data.isel(x=slice(5)), data.isel(x=slice(5, None))]
for data_vars in ["minimal", "different", "all", [], ["foo"]]:
actual = concat(objs, dim="x", data_vars=data_vars)
assert_identical(data, actual)
def test_concat_coords(self):
# TODO: annotating this func fails
data = Dataset({"foo": ("x", np.random.randn(10))})
expected = data.assign_coords(c=("x", [0] * 5 + [1] * 5))
objs = [
data.isel(x=slice(5)).assign_coords(c=0),
data.isel(x=slice(5, None)).assign_coords(c=1),
]
for coords in ["different", "all", ["c"]]:
actual = concat(objs, dim="x", coords=coords)
assert_identical(expected, actual)
for coords in ["minimal", []]:
with pytest.raises(merge.MergeError, match="conflicting values"):
concat(objs, dim="x", coords=coords)
def test_concat_constant_index(self):
# TODO: annotating this func fails
# GH425
ds1 = Dataset({"foo": 1.5}, {"y": 1})
ds2 = Dataset({"foo": 2.5}, {"y": 1})
expected = Dataset({"foo": ("y", [1.5, 2.5]), "y": [1, 1]})
for mode in ["different", "all", ["foo"]]:
actual = concat([ds1, ds2], "y", data_vars=mode)
assert_identical(expected, actual)
with pytest.raises(merge.MergeError, match="conflicting values"):
# previously dim="y", and raised error which makes no sense.
# "foo" has dimension "y" so minimal should concatenate it?
concat([ds1, ds2], "new_dim", data_vars="minimal")
def test_concat_size0(self) -> None:
data = create_test_data()
split_data = [data.isel(dim1=slice(0, 0)), data]
actual = concat(split_data, "dim1")
assert_identical(data, actual)
actual = concat(split_data[::-1], "dim1")
assert_identical(data, actual)
def test_concat_autoalign(self) -> None:
ds1 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 2])])})
ds2 = Dataset({"foo": DataArray([1, 2], coords=[("x", [1, 3])])})
actual = concat([ds1, ds2], "y")
expected = Dataset(
{
"foo": DataArray(
[[1, 2, np.nan], [1, np.nan, 2]],
dims=["y", "x"],
coords={"x": [1, 2, 3]},
)
}
)
assert_identical(expected, actual)
def test_concat_errors(self):
# TODO: annotating this func fails
data = create_test_data()
split_data = [data.isel(dim1=slice(3)), data.isel(dim1=slice(3, None))]
with pytest.raises(ValueError, match=r"must supply at least one"):
concat([], "dim1")
with pytest.raises(ValueError, match=r"Cannot specify both .*='different'"):
concat(
[data, data], dim="concat_dim", data_vars="different", compat="override"
)
with pytest.raises(ValueError, match=r"must supply at least one"):
concat([], "dim1")
with pytest.raises(ValueError, match=r"are not coordinates"):
concat([data, data], "new_dim", coords=["not_found"])
with pytest.raises(ValueError, match=r"global attributes not"):
data0, data1 = deepcopy(split_data)
data1.attrs["foo"] = "bar"
concat([data0, data1], "dim1", compat="identical")
assert_identical(data, concat([data0, data1], "dim1", compat="equals"))
with pytest.raises(ValueError, match=r"compat.* invalid"):
concat(split_data, "dim1", compat="foobar")
with pytest.raises(ValueError, match=r"unexpected value for"):
concat([data, data], "new_dim", coords="foobar")
with pytest.raises(
ValueError, match=r"coordinate in some datasets but not others"
):
concat([Dataset({"x": 0}), Dataset({"x": [1]})], dim="z")
with pytest.raises(
ValueError, match=r"coordinate in some datasets but not others"
):
concat([Dataset({"x": 0}), Dataset({}, {"x": 1})], dim="z")
def test_concat_join_kwarg(self) -> None:
ds1 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]})
ds2 = Dataset({"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]})
expected = {}
expected["outer"] = Dataset(
{"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])},
{"x": [0, 1], "y": [0, 0.0001]},
)
expected["inner"] = Dataset(
{"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []}
)
expected["left"] = Dataset(
{"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
expected["right"] = Dataset(
{"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0.0001]},
)
expected["override"] = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
with pytest.raises(ValueError, match=r"indexes along dimension 'y'"):
actual = concat([ds1, ds2], join="exact", dim="x")
for join in expected:
actual = concat([ds1, ds2], join=join, dim="x")
assert_equal(actual, expected[join])
# regression test for #3681
actual = concat(
[ds1.drop_vars("x"), ds2.drop_vars("x")], join="override", dim="y"
)
expected2 = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2))}, coords={"y": [0, 0.0001]}
)
assert_identical(actual, expected2)
@pytest.mark.parametrize(
"combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception",
[
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 1, "c": 3},
{"a": 1, "b": 2, "c": 3},
False,
),
("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False),
("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False),
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 4, "c": 3},
{"a": 1, "b": 2, "c": 3},
True,
),
("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True),
(
"override",
{"a": 1, "b": 2},
{"a": 4, "b": 5, "c": 3},
{"a": 1, "b": 2},
False,
),
(
"drop_conflicts",
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": 41, "c": 43, "d": 44},
False,
),
(
lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {},
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": -1, "b": 0, "c": 1},
False,
),
],
)
def test_concat_combine_attrs_kwarg(
self, combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception
):
ds1 = Dataset({"a": ("x", [0])}, coords={"x": [0]}, attrs=var1_attrs)
ds2 = Dataset({"a": ("x", [0])}, coords={"x": [1]}, attrs=var2_attrs)
if expect_exception:
with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"):
concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
else:
actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
expected = Dataset(
{"a": ("x", [0, 0])}, {"x": [0, 1]}, attrs=expected_attrs
)
assert_identical(actual, expected)
@pytest.mark.parametrize(
"combine_attrs, attrs1, attrs2, expected_attrs, expect_exception",
[
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 1, "c": 3},
{"a": 1, "b": 2, "c": 3},
False,
),
("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False),
("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False),
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 4, "c": 3},
{"a": 1, "b": 2, "c": 3},
True,
),
("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True),
(
"override",
{"a": 1, "b": 2},
{"a": 4, "b": 5, "c": 3},
{"a": 1, "b": 2},
False,
),
(
"drop_conflicts",
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": 41, "c": 43, "d": 44},
False,
),
(
lambda attrs, context: {"a": -1, "b": 0, "c": 1} if any(attrs) else {},
{"a": 41, "b": 42, "c": 43},
{"b": 2, "c": 43, "d": 44},
{"a": -1, "b": 0, "c": 1},
False,
),
],
)
def test_concat_combine_attrs_kwarg_variables(
self, combine_attrs, attrs1, attrs2, expected_attrs, expect_exception
):
"""check that combine_attrs is used on data variables and coords"""
ds1 = Dataset({"a": ("x", [0], attrs1)}, coords={"x": ("x", [0], attrs1)})
ds2 = Dataset({"a": ("x", [0], attrs2)}, coords={"x": ("x", [1], attrs2)})
if expect_exception:
with pytest.raises(ValueError, match=f"combine_attrs='{combine_attrs}'"):
concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
else:
actual = concat([ds1, ds2], dim="x", combine_attrs=combine_attrs)
expected = Dataset(
{"a": ("x", [0, 0], expected_attrs)},
{"x": ("x", [0, 1], expected_attrs)},
)
assert_identical(actual, expected)
def test_concat_promote_shape(self) -> None:
# mixed dims within variables
objs = [Dataset({}, {"x": 0}), Dataset({"x": [1]})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]})
assert_identical(actual, expected)
objs = [Dataset({"x": [0]}), Dataset({}, {"x": 1})]
actual = concat(objs, "x")
assert_identical(actual, expected)
# mixed dims between variables
objs = [Dataset({"x": [2], "y": 3}), Dataset({"x": [4], "y": 5})]
actual = concat(objs, "x")
expected = Dataset({"x": [2, 4], "y": ("x", [3, 5])})
assert_identical(actual, expected)
# mixed dims in coord variable
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1]}, {"y": ("x", [-2])})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1]}, {"y": ("x", [-1, -2])})
assert_identical(actual, expected)
# scalars with mixed lengths along concat dim -- values should repeat
objs = [Dataset({"x": [0]}, {"y": -1}), Dataset({"x": [1, 2]}, {"y": -2})]
actual = concat(objs, "x")
expected = Dataset({"x": [0, 1, 2]}, {"y": ("x", [-1, -2, -2])})
assert_identical(actual, expected)
# broadcast 1d x 1d -> 2d
objs = [
Dataset({"z": ("x", [-1])}, {"x": [0], "y": [0]}),
Dataset({"z": ("y", [1])}, {"x": [1], "y": [0]}),
]
actual = concat(objs, "x")
expected = Dataset({"z": (("x", "y"), [[-1], [1]])}, {"x": [0, 1], "y": [0]})
assert_identical(actual, expected)
def test_concat_do_not_promote(self) -> None:
# GH438
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 1, "t": [0]}),
]
expected = Dataset({"y": ("t", [1, 2])}, {"x": 1, "t": [0, 0]})
actual = concat(objs, "t")
assert_identical(expected, actual)
objs = [
Dataset({"y": ("t", [1])}, {"x": 1, "t": [0]}),
Dataset({"y": ("t", [2])}, {"x": 2, "t": [0]}),
]
with pytest.raises(ValueError):
concat(objs, "t", coords="minimal")
def test_concat_dim_is_variable(self) -> None:
objs = [Dataset({"x": 0}), Dataset({"x": 1})]
coord = Variable("y", [3, 4])
expected = Dataset({"x": ("y", [0, 1]), "y": [3, 4]})
actual = concat(objs, coord)
assert_identical(actual, expected)
def test_concat_multiindex(self) -> None:
x = pd.MultiIndex.from_product([[1, 2, 3], ["a", "b"]])
expected = Dataset({"x": x})
actual = concat(
[expected.isel(x=slice(2)), expected.isel(x=slice(2, None))], "x"
)
assert expected.equals(actual)
assert isinstance(actual.x.to_index(), pd.MultiIndex)
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}])
def test_concat_fill_value(self, fill_value) -> None:
datasets = [
Dataset({"a": ("x", [2, 3]), "b": ("x", [-2, 1]), "x": [1, 2]}),
Dataset({"a": ("x", [1, 2]), "b": ("x", [3, -1]), "x": [0, 1]}),
]
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_a = fill_value_b = np.nan
elif isinstance(fill_value, dict):
fill_value_a = fill_value["a"]
fill_value_b = fill_value["b"]
else:
fill_value_a = fill_value_b = fill_value
expected = Dataset(
{
"a": (("t", "x"), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]),
"b": (("t", "x"), [[fill_value_b, -2, 1], [3, -1, fill_value_b]]),
},
{"x": [0, 1, 2]},
)
actual = concat(datasets, dim="t", fill_value=fill_value)
assert_identical(actual, expected)
@pytest.mark.parametrize("dtype", [str, bytes])
@pytest.mark.parametrize("dim", ["x1", "x2"])
def test_concat_str_dtype(self, dtype, dim) -> None:
data = np.arange(4).reshape([2, 2])
da1 = Dataset(
{
"data": (["x1", "x2"], data),
"x1": [0, 1],
"x2": np.array(["a", "b"], dtype=dtype),
}
)
da2 = Dataset(
{
"data": (["x1", "x2"], data),
"x1": np.array([1, 2]),
"x2": np.array(["c", "d"], dtype=dtype),
}
)
actual = concat([da1, da2], dim=dim)
assert np.issubdtype(actual.x2.dtype, dtype)
class TestConcatDataArray:
def test_concat(self) -> None:
ds = Dataset(
{
"foo": (["x", "y"], np.random.random((2, 3))),
"bar": (["x", "y"], np.random.random((2, 3))),
},
{"x": [0, 1]},
)
foo = ds["foo"]
bar = ds["bar"]
# from dataset array:
expected = DataArray(
np.array([foo.values, bar.values]),
dims=["w", "x", "y"],
coords={"x": [0, 1]},
)
actual = concat([foo, bar], "w")
assert_equal(expected, actual)
# from iteration:
grouped = [g for _, g in foo.groupby("x")]
stacked = concat(grouped, ds["x"])
assert_identical(foo, stacked)
# with an index as the 'dim' argument
stacked = concat(grouped, pd.Index(ds["x"], name="x"))
assert_identical(foo, stacked)
actual2 = concat([foo[0], foo[1]], pd.Index([0, 1])).reset_coords(drop=True)
expected = foo[:2].rename({"x": "concat_dim"})
assert_identical(expected, actual2)
actual3 = concat([foo[0], foo[1]], [0, 1]).reset_coords(drop=True)
expected = foo[:2].rename({"x": "concat_dim"})
assert_identical(expected, actual3)
with pytest.raises(ValueError, match=r"not identical"):
concat([foo, bar], dim="w", compat="identical")
with pytest.raises(ValueError, match=r"not a valid argument"):
concat([foo, bar], dim="w", data_vars="minimal")
def test_concat_encoding(self) -> None:
# Regression test for GH1297
ds = Dataset(
{
"foo": (["x", "y"], np.random.random((2, 3))),
"bar": (["x", "y"], np.random.random((2, 3))),
},
{"x": [0, 1]},
)
foo = ds["foo"]
foo.encoding = {"complevel": 5}
ds.encoding = {"unlimited_dims": "x"}
assert concat([foo, foo], dim="x").encoding == foo.encoding
assert concat([ds, ds], dim="x").encoding == ds.encoding
@requires_dask
def test_concat_lazy(self) -> None:
import dask.array as da
arrays = [
DataArray(
da.from_array(InaccessibleArray(np.zeros((3, 3))), 3), dims=["x", "y"]
)
for _ in range(2)
]
# should not raise
combined = concat(arrays, dim="z")
assert combined.shape == (2, 3, 3)
assert combined.dims == ("z", "x", "y")
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0])
def test_concat_fill_value(self, fill_value) -> None:
foo = DataArray([1, 2], coords=[("x", [1, 2])])
bar = DataArray([1, 2], coords=[("x", [1, 3])])
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value = np.nan
expected = DataArray(
[[1, 2, fill_value], [1, fill_value, 2]],
dims=["y", "x"],
coords={"x": [1, 2, 3]},
)
actual = concat((foo, bar), dim="y", fill_value=fill_value)
assert_identical(actual, expected)
def test_concat_join_kwarg(self) -> None:
ds1 = Dataset(
{"a": (("x", "y"), [[0]])}, coords={"x": [0], "y": [0]}
).to_array()
ds2 = Dataset(
{"a": (("x", "y"), [[0]])}, coords={"x": [1], "y": [0.0001]}
).to_array()
expected = {}
expected["outer"] = Dataset(
{"a": (("x", "y"), [[0, np.nan], [np.nan, 0]])},
{"x": [0, 1], "y": [0, 0.0001]},
)
expected["inner"] = Dataset(
{"a": (("x", "y"), [[], []])}, {"x": [0, 1], "y": []}
)
expected["left"] = Dataset(
{"a": (("x", "y"), np.array([0, np.nan], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
expected["right"] = Dataset(
{"a": (("x", "y"), np.array([np.nan, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0.0001]},
)
expected["override"] = Dataset(
{"a": (("x", "y"), np.array([0, 0], ndmin=2).T)},
coords={"x": [0, 1], "y": [0]},
)
with pytest.raises(ValueError, match=r"indexes along dimension 'y'"):
actual = concat([ds1, ds2], join="exact", dim="x")
for join in expected:
actual = concat([ds1, ds2], join=join, dim="x")
assert_equal(actual, expected[join].to_array())
def test_concat_combine_attrs_kwarg(self) -> None:
da1 = DataArray([0], coords=[("x", [0])], attrs={"b": 42})
da2 = DataArray([0], coords=[("x", [1])], attrs={"b": 42, "c": 43})
expected = {}
expected["drop"] = DataArray([0, 0], coords=[("x", [0, 1])])
expected["no_conflicts"] = DataArray(
[0, 0], coords=[("x", [0, 1])], attrs={"b": 42, "c": 43}
)
expected["override"] = DataArray(
[0, 0], coords=[("x", [0, 1])], attrs={"b": 42}
)
with pytest.raises(ValueError, match=r"combine_attrs='identical'"):
actual = concat([da1, da2], dim="x", combine_attrs="identical")
with pytest.raises(ValueError, match=r"combine_attrs='no_conflicts'"):
da3 = da2.copy(deep=True)
da3.attrs["b"] = 44
actual = concat([da1, da3], dim="x", combine_attrs="no_conflicts")
for combine_attrs in expected:
actual = concat([da1, da2], dim="x", combine_attrs=combine_attrs)
assert_identical(actual, expected[combine_attrs])
@pytest.mark.parametrize("dtype", [str, bytes])
@pytest.mark.parametrize("dim", ["x1", "x2"])
def test_concat_str_dtype(self, dtype, dim) -> None:
data = np.arange(4).reshape([2, 2])
da1 = DataArray(
data=data,
dims=["x1", "x2"],
coords={"x1": [0, 1], "x2": np.array(["a", "b"], dtype=dtype)},
)
da2 = DataArray(
data=data,
dims=["x1", "x2"],
coords={"x1": np.array([1, 2]), "x2": np.array(["c", "d"], dtype=dtype)},
)
actual = concat([da1, da2], dim=dim)
assert np.issubdtype(actual.x2.dtype, dtype)
def test_concat_coord_name(self) -> None:
da = DataArray([0], dims="a")
da_concat = concat([da, da], dim=DataArray([0, 1], dims="b"))
assert list(da_concat.coords) == ["b"]
da_concat_std = concat([da, da], dim=DataArray([0, 1]))
assert list(da_concat_std.coords) == ["dim_0"]
@pytest.mark.parametrize("attr1", ({"a": {"meta": [10, 20, 30]}}, {"a": [1, 2, 3]}, {}))
@pytest.mark.parametrize("attr2", ({"a": [1, 2, 3]}, {}))
def test_concat_attrs_first_variable(attr1, attr2) -> None:
arrs = [
DataArray([[1], [2]], dims=["x", "y"], attrs=attr1),
DataArray([[3], [4]], dims=["x", "y"], attrs=attr2),
]
concat_attrs = concat(arrs, "y").attrs
assert concat_attrs == attr1
def test_concat_merge_single_non_dim_coord():
# TODO: annotating this func fails
da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
expected = DataArray(range(1, 7), dims="x", coords={"x": range(1, 7), "y": 1})
for coords in ["different", "minimal"]:
actual = concat([da1, da2], "x", coords=coords)
assert_identical(actual, expected)
with pytest.raises(ValueError, match=r"'y' is not present in all datasets."):
concat([da1, da2], dim="x", coords="all")
da1 = DataArray([1, 2, 3], dims="x", coords={"x": [1, 2, 3], "y": 1})
da2 = DataArray([4, 5, 6], dims="x", coords={"x": [4, 5, 6]})
da3 = DataArray([7, 8, 9], dims="x", coords={"x": [7, 8, 9], "y": 1})
for coords in ["different", "all"]:
with pytest.raises(ValueError, match=r"'y' not present in all datasets"):
concat([da1, da2, da3], dim="x")
def test_concat_preserve_coordinate_order() -> None:
x = np.arange(0, 5)
y = np.arange(0, 10)
time = np.arange(0, 4)
data = np.zeros((4, 10, 5), dtype=bool)
ds1 = Dataset(
{"data": (["time", "y", "x"], data[0:2])},
coords={"time": time[0:2], "y": y, "x": x},
)
ds2 = Dataset(
{"data": (["time", "y", "x"], data[2:4])},
coords={"time": time[2:4], "y": y, "x": x},
)
expected = Dataset(
{"data": (["time", "y", "x"], data)},
coords={"time": time, "y": y, "x": x},
)
actual = concat([ds1, ds2], dim="time")
# check dimension order
for act, exp in zip(actual.dims, expected.dims):
assert act == exp
assert actual.dims[act] == expected.dims[exp]
# check coordinate order
for act, exp in zip(actual.coords, expected.coords):
assert act == exp
assert_identical(actual.coords[act], expected.coords[exp])
def test_concat_typing_check() -> None:
ds = Dataset({"foo": 1}, {"bar": 2})
da = Dataset({"foo": 3}, {"bar": 4}).to_array(dim="foo")
# concatenate a list of non-homogeneous types must raise TypeError
with pytest.raises(
TypeError,
match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's",
):
concat([ds, da], dim="foo")
with pytest.raises(
TypeError,
match="The elements in the input list need to be either all 'Dataset's or all 'DataArray's",
):
concat([da, ds], dim="foo")
| [
"pandas.MultiIndex.from_product",
"xarray.Variable",
"numpy.random.random",
"xarray.Dataset",
"xarray.concat",
"pytest.mark.parametrize",
"numpy.zeros",
"numpy.issubdtype",
"pytest.raises",
"numpy.array",
"xarray.DataArray",
"copy.deepcopy",
"pandas.Index",
"numpy.random.randn",
"numpy.a... | [((27055, 27147), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attr1"""', "({'a': {'meta': [10, 20, 30]}}, {'a': [1, 2, 3]}, {})"], {}), "('attr1', ({'a': {'meta': [10, 20, 30]}}, {'a': [1, \n 2, 3]}, {}))\n", (27078, 27147), False, 'import pytest\n'), ((27144, 27200), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""attr2"""', "({'a': [1, 2, 3]}, {})"], {}), "('attr2', ({'a': [1, 2, 3]}, {}))\n", (27167, 27200), False, 'import pytest\n'), ((413, 557), 'xarray.Dataset', 'Dataset', (["{'has_x_y': (('y', 'x'), [[1, 2]]), 'has_x': ('x', [1, 2]), 'no_x_y': ('z',\n [1, 2])}"], {'coords': "{'x': [0, 1], 'y': [0], 'z': [-1, -2]}"}), "({'has_x_y': (('y', 'x'), [[1, 2]]), 'has_x': ('x', [1, 2]),\n 'no_x_y': ('z', [1, 2])}, coords={'x': [0, 1], 'y': [0], 'z': [-1, -2]})\n", (420, 557), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((634, 801), 'xarray.Dataset', 'Dataset', (["{'has_x_y': (('y', 'x'), [[3, 4]]), 'has_x': ('x', [1, 2]), 'no_x_y': (('q',\n 'z'), [[1, 2]])}"], {'coords': "{'x': [0, 1], 'y': [1], 'z': [-1, -2], 'q': [0]}"}), "({'has_x_y': (('y', 'x'), [[3, 4]]), 'has_x': ('x', [1, 2]),\n 'no_x_y': (('q', 'z'), [[1, 2]])}, coords={'x': [0, 1], 'y': [1], 'z':\n [-1, -2], 'q': [0]})\n", (641, 801), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((878, 953), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""y"""', 'data_vars': '"""minimal"""', 'compat': '"""broadcast_equals"""'}), "([ds1, ds2], dim='y', data_vars='minimal', compat='broadcast_equals')\n", (884, 953), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((1885, 1944), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""coords"""', "['different', 'minimal']"], {}), "('coords', ['different', 'minimal'])\n", (1908, 1944), False, 'import pytest\n'), ((1950, 1998), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', "['dim1', 'dim2']"], {}), "('dim', ['dim1', 'dim2'])\n", (1973, 1998), False, 'import pytest\n'), ((3420, 3486), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""coords"""', "['different', 'minimal', 'all']"], {}), "('coords', ['different', 'minimal', 'all'])\n", (3443, 3486), False, 'import pytest\n'), ((3492, 3540), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', "['dim1', 'dim2']"], {}), "('dim', ['dim1', 'dim2'])\n", (3515, 3540), False, 'import pytest\n'), ((18719, 18795), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', "[dtypes.NA, 2, 2.0, {'a': 2, 'b': 1}]"], {}), "('fill_value', [dtypes.NA, 2, 2.0, {'a': 2, 'b': 1}])\n", (18742, 18795), False, 'import pytest\n'), ((19796, 19842), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[str, bytes]'], {}), "('dtype', [str, bytes])\n", (19819, 19842), False, 'import pytest\n'), ((19848, 19892), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', "['x1', 'x2']"], {}), "('dim', ['x1', 'x2'])\n", (19871, 19892), False, 'import pytest\n'), ((22993, 23051), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""fill_value"""', '[dtypes.NA, 2, 2.0]'], {}), "('fill_value', [dtypes.NA, 2, 2.0])\n", (23016, 23051), False, 'import pytest\n'), ((26090, 26136), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dtype"""', '[str, bytes]'], {}), "('dtype', [str, bytes])\n", (26113, 26136), False, 'import pytest\n'), ((26142, 26186), 'pytest.mark.parametrize', 'pytest.mark.parametrize', (['"""dim"""', "['x1', 'x2']"], {}), "('dim', ['x1', 'x2'])\n", (26165, 26186), False, 'import pytest\n'), ((27577, 27640), 'xarray.DataArray', 'DataArray', (['[1, 2, 3]'], {'dims': '"""x"""', 'coords': "{'x': [1, 2, 3], 'y': 1}"}), "([1, 2, 3], dims='x', coords={'x': [1, 2, 3], 'y': 1})\n", (27586, 27640), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((27651, 27706), 'xarray.DataArray', 'DataArray', (['[4, 5, 6]'], {'dims': '"""x"""', 'coords': "{'x': [4, 5, 6]}"}), "([4, 5, 6], dims='x', coords={'x': [4, 5, 6]})\n", (27660, 27706), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((28079, 28142), 'xarray.DataArray', 'DataArray', (['[1, 2, 3]'], {'dims': '"""x"""', 'coords': "{'x': [1, 2, 3], 'y': 1}"}), "([1, 2, 3], dims='x', coords={'x': [1, 2, 3], 'y': 1})\n", (28088, 28142), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((28153, 28208), 'xarray.DataArray', 'DataArray', (['[4, 5, 6]'], {'dims': '"""x"""', 'coords': "{'x': [4, 5, 6]}"}), "([4, 5, 6], dims='x', coords={'x': [4, 5, 6]})\n", (28162, 28208), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((28219, 28282), 'xarray.DataArray', 'DataArray', (['[7, 8, 9]'], {'dims': '"""x"""', 'coords': "{'x': [7, 8, 9], 'y': 1}"}), "([7, 8, 9], dims='x', coords={'x': [7, 8, 9], 'y': 1})\n", (28228, 28282), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((28513, 28528), 'numpy.arange', 'np.arange', (['(0)', '(5)'], {}), '(0, 5)\n', (28522, 28528), True, 'import numpy as np\n'), ((28537, 28553), 'numpy.arange', 'np.arange', (['(0)', '(10)'], {}), '(0, 10)\n', (28546, 28553), True, 'import numpy as np\n'), ((28565, 28580), 'numpy.arange', 'np.arange', (['(0)', '(4)'], {}), '(0, 4)\n', (28574, 28580), True, 'import numpy as np\n'), ((28592, 28624), 'numpy.zeros', 'np.zeros', (['(4, 10, 5)'], {'dtype': 'bool'}), '((4, 10, 5), dtype=bool)\n', (28600, 28624), True, 'import numpy as np\n'), ((28636, 28735), 'xarray.Dataset', 'Dataset', (["{'data': (['time', 'y', 'x'], data[0:2])}"], {'coords': "{'time': time[0:2], 'y': y, 'x': x}"}), "({'data': (['time', 'y', 'x'], data[0:2])}, coords={'time': time[0:2\n ], 'y': y, 'x': x})\n", (28643, 28735), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((28764, 28863), 'xarray.Dataset', 'Dataset', (["{'data': (['time', 'y', 'x'], data[2:4])}"], {'coords': "{'time': time[2:4], 'y': y, 'x': x}"}), "({'data': (['time', 'y', 'x'], data[2:4])}, coords={'time': time[2:4\n ], 'y': y, 'x': x})\n", (28771, 28863), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((28898, 28986), 'xarray.Dataset', 'Dataset', (["{'data': (['time', 'y', 'x'], data)}"], {'coords': "{'time': time, 'y': y, 'x': x}"}), "({'data': (['time', 'y', 'x'], data)}, coords={'time': time, 'y': y,\n 'x': x})\n", (28905, 28986), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((29020, 29050), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""time"""'}), "([ds1, ds2], dim='time')\n", (29026, 29050), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((29444, 29475), 'xarray.Dataset', 'Dataset', (["{'foo': 1}", "{'bar': 2}"], {}), "({'foo': 1}, {'bar': 2})\n", (29451, 29475), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((1133, 1211), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""coordinates in some datasets but not others"""'}), "(ValueError, match='coordinates in some datasets but not others')\n", (1146, 1211), False, 'import pytest\n'), ((1236, 1263), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""q"""'}), "([ds1, ds2], dim='q')\n", (1242, 1263), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((1273, 1342), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'q\' is not present in all datasets"""'}), '(ValueError, match="\'q\' is not present in all datasets")\n', (1286, 1342), False, 'import pytest\n'), ((1353, 1380), 'xarray.concat', 'concat', (['[ds2, ds1]'], {'dim': '"""q"""'}), "([ds2, ds1], dim='q')\n", (1359, 1380), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((2356, 2413), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'a': ('y', [0.1])}", 'coords': "{'x': 0.1}"}), "(data_vars={'a': ('y', [0.1])}, coords={'x': 0.1})\n", (2363, 2413), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((2428, 2485), 'xarray.Dataset', 'Dataset', ([], {'data_vars': "{'a': ('y', [0.2])}", 'coords': "{'z': 0.2}"}), "(data_vars={'a': ('y', [0.2])}, coords={'z': 0.2})\n", (2435, 2485), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((2503, 2548), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""y"""', 'coords': '"""minimal"""'}), "([ds1, ds2], dim='y', coords='minimal')\n", (2509, 2548), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((2568, 2630), 'xarray.Dataset', 'Dataset', (["{'a': ('y', [0.1, 0.2])}"], {'coords': "{'x': 0.1, 'z': 0.2}"}), "({'a': ('y', [0.1, 0.2])}, coords={'x': 0.1, 'z': 0.2})\n", (2575, 2630), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((2843, 2863), 'copy.deepcopy', 'deepcopy', (['split_data'], {}), '(split_data)\n', (2851, 2863), False, 'from copy import deepcopy\n'), ((2933, 2963), 'xarray.concat', 'concat', (['[data0, data1]', '"""dim1"""'], {}), "([data0, data1], 'dim1')\n", (2939, 2963), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((3297, 3344), 'xarray.concat', 'concat', (['datasets', 'data[dim]'], {'coords': 'concat_over'}), '(datasets, data[dim], coords=concat_over)\n', (3303, 3344), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((3843, 3885), 'xarray.concat', 'concat', (['datasets', 'data[dim]'], {'coords': 'coords'}), '(datasets, data[dim], coords=coords)\n', (3849, 3885), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((5100, 5142), 'xarray.concat', 'concat', (['objs'], {'dim': '"""x"""', 'data_vars': '"""minimal"""'}), "(objs, dim='x', data_vars='minimal')\n", (5106, 5142), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((6371, 6402), 'xarray.Dataset', 'Dataset', (["{'foo': 1.5}", "{'y': 1}"], {}), "({'foo': 1.5}, {'y': 1})\n", (6378, 6402), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((6417, 6448), 'xarray.Dataset', 'Dataset', (["{'foo': 2.5}", "{'y': 1}"], {}), "({'foo': 2.5}, {'y': 1})\n", (6424, 6448), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((6468, 6516), 'xarray.Dataset', 'Dataset', (["{'foo': ('y', [1.5, 2.5]), 'y': [1, 1]}"], {}), "({'foo': ('y', [1.5, 2.5]), 'y': [1, 1]})\n", (6475, 6516), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((7108, 7134), 'xarray.concat', 'concat', (['split_data', '"""dim1"""'], {}), "(split_data, 'dim1')\n", (7114, 7134), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((7192, 7224), 'xarray.concat', 'concat', (['split_data[::-1]', '"""dim1"""'], {}), "(split_data[::-1], 'dim1')\n", (7198, 7224), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((7475, 7498), 'xarray.concat', 'concat', (['[ds1, ds2]', '"""y"""'], {}), "([ds1, ds2], 'y')\n", (7481, 7498), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9543, 9607), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[0]])}"], {'coords': "{'x': [0], 'y': [0]}"}), "({'a': (('x', 'y'), [[0]])}, coords={'x': [0], 'y': [0]})\n", (9550, 9607), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9622, 9691), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[0]])}"], {'coords': "{'x': [1], 'y': [0.0001]}"}), "({'a': (('x', 'y'), [[0]])}, coords={'x': [1], 'y': [0.0001]})\n", (9629, 9691), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9743, 9836), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[0, np.nan], [np.nan, 0]])}", "{'x': [0, 1], 'y': [0, 0.0001]}"], {}), "({'a': (('x', 'y'), [[0, np.nan], [np.nan, 0]])}, {'x': [0, 1], 'y':\n [0, 0.0001]})\n", (9750, 9836), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9896, 9958), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[], []])}", "{'x': [0, 1], 'y': []}"], {}), "({'a': (('x', 'y'), [[], []])}, {'x': [0, 1], 'y': []})\n", (9903, 9958), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((12815, 12878), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [0])}"], {'coords': "{'x': [0]}", 'attrs': 'var1_attrs'}), "({'a': ('x', [0])}, coords={'x': [0]}, attrs=var1_attrs)\n", (12822, 12878), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((12893, 12956), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [0])}"], {'coords': "{'x': [1]}", 'attrs': 'var2_attrs'}), "({'a': ('x', [0])}, coords={'x': [1]}, attrs=var2_attrs)\n", (12900, 12956), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((15232, 15300), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [0], attrs1)}"], {'coords': "{'x': ('x', [0], attrs1)}"}), "({'a': ('x', [0], attrs1)}, coords={'x': ('x', [0], attrs1)})\n", (15239, 15300), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((15315, 15383), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [0], attrs2)}"], {'coords': "{'x': ('x', [1], attrs2)}"}), "({'a': ('x', [0], attrs2)}, coords={'x': ('x', [1], attrs2)})\n", (15322, 15383), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16032, 16049), 'xarray.concat', 'concat', (['objs', '"""x"""'], {}), "(objs, 'x')\n", (16038, 16049), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16069, 16091), 'xarray.Dataset', 'Dataset', (["{'x': [0, 1]}"], {}), "({'x': [0, 1]})\n", (16076, 16091), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16213, 16230), 'xarray.concat', 'concat', (['objs', '"""x"""'], {}), "(objs, 'x')\n", (16219, 16230), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16405, 16422), 'xarray.concat', 'concat', (['objs', '"""x"""'], {}), "(objs, 'x')\n", (16411, 16422), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16442, 16484), 'xarray.Dataset', 'Dataset', (["{'x': [2, 4], 'y': ('x', [3, 5])}"], {}), "({'x': [2, 4], 'y': ('x', [3, 5])})\n", (16449, 16484), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16674, 16691), 'xarray.concat', 'concat', (['objs', '"""x"""'], {}), "(objs, 'x')\n", (16680, 16691), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16711, 16757), 'xarray.Dataset', 'Dataset', (["{'x': [0, 1]}", "{'y': ('x', [-1, -2])}"], {}), "({'x': [0, 1]}, {'y': ('x', [-1, -2])})\n", (16718, 16757), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16980, 16997), 'xarray.concat', 'concat', (['objs', '"""x"""'], {}), "(objs, 'x')\n", (16986, 16997), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17017, 17070), 'xarray.Dataset', 'Dataset', (["{'x': [0, 1, 2]}", "{'y': ('x', [-1, -2, -2])}"], {}), "({'x': [0, 1, 2]}, {'y': ('x', [-1, -2, -2])})\n", (17024, 17070), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17318, 17335), 'xarray.concat', 'concat', (['objs', '"""x"""'], {}), "(objs, 'x')\n", (17324, 17335), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17355, 17421), 'xarray.Dataset', 'Dataset', (["{'z': (('x', 'y'), [[-1], [1]])}", "{'x': [0, 1], 'y': [0]}"], {}), "({'z': (('x', 'y'), [[-1], [1]])}, {'x': [0, 1], 'y': [0]})\n", (17362, 17421), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17698, 17750), 'xarray.Dataset', 'Dataset', (["{'y': ('t', [1, 2])}", "{'x': 1, 't': [0, 0]}"], {}), "({'y': ('t', [1, 2])}, {'x': 1, 't': [0, 0]})\n", (17705, 17750), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17768, 17785), 'xarray.concat', 'concat', (['objs', '"""t"""'], {}), "(objs, 't')\n", (17774, 17785), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18187, 18208), 'xarray.Variable', 'Variable', (['"""y"""', '[3, 4]'], {}), "('y', [3, 4])\n", (18195, 18208), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18228, 18270), 'xarray.Dataset', 'Dataset', (["{'x': ('y', [0, 1]), 'y': [3, 4]}"], {}), "({'x': ('y', [0, 1]), 'y': [3, 4]})\n", (18235, 18270), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18288, 18307), 'xarray.concat', 'concat', (['objs', 'coord'], {}), '(objs, coord)\n', (18294, 18307), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18410, 18461), 'pandas.MultiIndex.from_product', 'pd.MultiIndex.from_product', (["[[1, 2, 3], ['a', 'b']]"], {}), "([[1, 2, 3], ['a', 'b']])\n", (18436, 18461), True, 'import pandas as pd\n'), ((18481, 18498), 'xarray.Dataset', 'Dataset', (["{'x': x}"], {}), "({'x': x})\n", (18488, 18498), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((19439, 19607), 'xarray.Dataset', 'Dataset', (["{'a': (('t', 'x'), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]), 'b': ((\n 't', 'x'), [[fill_value_b, -2, 1], [3, -1, fill_value_b]])}", "{'x': [0, 1, 2]}"], {}), "({'a': (('t', 'x'), [[fill_value_a, 2, 3], [1, 2, fill_value_a]]),\n 'b': (('t', 'x'), [[fill_value_b, -2, 1], [3, -1, fill_value_b]])}, {\n 'x': [0, 1, 2]})\n", (19446, 19607), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((19698, 19746), 'xarray.concat', 'concat', (['datasets'], {'dim': '"""t"""', 'fill_value': 'fill_value'}), "(datasets, dim='t', fill_value=fill_value)\n", (19704, 19746), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((20411, 20438), 'xarray.concat', 'concat', (['[da1, da2]'], {'dim': 'dim'}), '([da1, da2], dim=dim)\n', (20417, 20438), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((20455, 20492), 'numpy.issubdtype', 'np.issubdtype', (['actual.x2.dtype', 'dtype'], {}), '(actual.x2.dtype, dtype)\n', (20468, 20492), True, 'import numpy as np\n'), ((21023, 21046), 'xarray.concat', 'concat', (['[foo, bar]', '"""w"""'], {}), "([foo, bar], 'w')\n", (21029, 21046), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((21181, 21205), 'xarray.concat', 'concat', (['grouped', "ds['x']"], {}), "(grouped, ds['x'])\n", (21187, 21205), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((22872, 22895), 'xarray.concat', 'concat', (['arrays'], {'dim': '"""z"""'}), "(arrays, dim='z')\n", (22878, 22895), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((23124, 23165), 'xarray.DataArray', 'DataArray', (['[1, 2]'], {'coords': "[('x', [1, 2])]"}), "([1, 2], coords=[('x', [1, 2])])\n", (23133, 23165), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((23180, 23221), 'xarray.DataArray', 'DataArray', (['[1, 2]'], {'coords': "[('x', [1, 3])]"}), "([1, 2], coords=[('x', [1, 3])])\n", (23189, 23221), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((23409, 23507), 'xarray.DataArray', 'DataArray', (['[[1, 2, fill_value], [1, fill_value, 2]]'], {'dims': "['y', 'x']", 'coords': "{'x': [1, 2, 3]}"}), "([[1, 2, fill_value], [1, fill_value, 2]], dims=['y', 'x'], coords\n ={'x': [1, 2, 3]})\n", (23418, 23507), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((23567, 23617), 'xarray.concat', 'concat', (['(foo, bar)'], {'dim': '"""y"""', 'fill_value': 'fill_value'}), "((foo, bar), dim='y', fill_value=fill_value)\n", (23573, 23617), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((23988, 24081), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[0, np.nan], [np.nan, 0]])}", "{'x': [0, 1], 'y': [0, 0.0001]}"], {}), "({'a': (('x', 'y'), [[0, np.nan], [np.nan, 0]])}, {'x': [0, 1], 'y':\n [0, 0.0001]})\n", (23995, 24081), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((24141, 24203), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[], []])}", "{'x': [0, 1], 'y': []}"], {}), "({'a': (('x', 'y'), [[], []])}, {'x': [0, 1], 'y': []})\n", (24148, 24203), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25065, 25117), 'xarray.DataArray', 'DataArray', (['[0]'], {'coords': "[('x', [0])]", 'attrs': "{'b': 42}"}), "([0], coords=[('x', [0])], attrs={'b': 42})\n", (25074, 25117), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25132, 25193), 'xarray.DataArray', 'DataArray', (['[0]'], {'coords': "[('x', [1])]", 'attrs': "{'b': 42, 'c': 43}"}), "([0], coords=[('x', [1])], attrs={'b': 42, 'c': 43})\n", (25141, 25193), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25244, 25285), 'xarray.DataArray', 'DataArray', (['[0, 0]'], {'coords': "[('x', [0, 1])]"}), "([0, 0], coords=[('x', [0, 1])])\n", (25253, 25285), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25321, 25388), 'xarray.DataArray', 'DataArray', (['[0, 0]'], {'coords': "[('x', [0, 1])]", 'attrs': "{'b': 42, 'c': 43}"}), "([0, 0], coords=[('x', [0, 1])], attrs={'b': 42, 'c': 43})\n", (25330, 25388), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25442, 25500), 'xarray.DataArray', 'DataArray', (['[0, 0]'], {'coords': "[('x', [0, 1])]", 'attrs': "{'b': 42}"}), "([0, 0], coords=[('x', [0, 1])], attrs={'b': 42})\n", (25451, 25500), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((26647, 26674), 'xarray.concat', 'concat', (['[da1, da2]'], {'dim': 'dim'}), '([da1, da2], dim=dim)\n', (26653, 26674), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((26691, 26728), 'numpy.issubdtype', 'np.issubdtype', (['actual.x2.dtype', 'dtype'], {}), '(actual.x2.dtype, dtype)\n', (26704, 26728), True, 'import numpy as np\n'), ((26790, 26814), 'xarray.DataArray', 'DataArray', (['[0]'], {'dims': '"""a"""'}), "([0], dims='a')\n", (26799, 26814), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((27283, 27334), 'xarray.DataArray', 'DataArray', (['[[1], [2]]'], {'dims': "['x', 'y']", 'attrs': 'attr1'}), "([[1], [2]], dims=['x', 'y'], attrs=attr1)\n", (27292, 27334), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((27344, 27395), 'xarray.DataArray', 'DataArray', (['[[3], [4]]'], {'dims': "['x', 'y']", 'attrs': 'attr2'}), "([[3], [4]], dims=['x', 'y'], attrs=attr2)\n", (27353, 27395), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((27423, 27440), 'xarray.concat', 'concat', (['arrs', '"""y"""'], {}), "(arrs, 'y')\n", (27429, 27440), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((27853, 27891), 'xarray.concat', 'concat', (['[da1, da2]', '"""x"""'], {'coords': 'coords'}), "([da1, da2], 'x', coords=coords)\n", (27859, 27891), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((27945, 28015), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'y\' is not present in all datasets."""'}), '(ValueError, match="\'y\' is not present in all datasets.")\n', (27958, 28015), False, 'import pytest\n'), ((28026, 28067), 'xarray.concat', 'concat', (['[da1, da2]'], {'dim': '"""x"""', 'coords': '"""all"""'}), "([da1, da2], dim='x', coords='all')\n", (28032, 28067), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((29618, 29745), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""The elements in the input list need to be either all \'Dataset\'s or all \'DataArray\'s"""'}), '(TypeError, match=\n "The elements in the input list need to be either all \'Dataset\'s or all \'DataArray\'s"\n )\n', (29631, 29745), False, 'import pytest\n'), ((29768, 29795), 'xarray.concat', 'concat', (['[ds, da]'], {'dim': '"""foo"""'}), "([ds, da], dim='foo')\n", (29774, 29795), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((29805, 29932), 'pytest.raises', 'pytest.raises', (['TypeError'], {'match': '"""The elements in the input list need to be either all \'Dataset\'s or all \'DataArray\'s"""'}), '(TypeError, match=\n "The elements in the input list need to be either all \'Dataset\'s or all \'DataArray\'s"\n )\n', (29818, 29932), False, 'import pytest\n'), ((29955, 29982), 'xarray.concat', 'concat', (['[da, ds]'], {'dim': '"""foo"""'}), "([da, ds], dim='foo')\n", (29961, 29982), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((2159, 2195), 'xarray.concat', 'concat', (['datasets', 'dim'], {'coords': 'coords'}), '(datasets, dim, coords=coords)\n', (2165, 2195), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((2895, 2914), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (2910, 2914), True, 'import numpy as np\n'), ((3744, 3756), 'numpy.arange', 'np.arange', (['(3)'], {}), '(3)\n', (3753, 3756), True, 'import numpy as np\n'), ((4357, 4383), 'xarray.concat', 'concat', (['split_data', '"""dim1"""'], {}), "(split_data, 'dim1')\n", (4363, 4383), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((4778, 4799), 'xarray.concat', 'concat', (['datasets', 'dim'], {}), '(datasets, dim)\n', (4784, 4799), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((5498, 5540), 'xarray.concat', 'concat', (['objs'], {'dim': '"""x"""', 'data_vars': 'data_vars'}), "(objs, dim='x', data_vars=data_vars)\n", (5504, 5540), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((6001, 6037), 'xarray.concat', 'concat', (['objs'], {'dim': '"""x"""', 'coords': 'coords'}), "(objs, dim='x', coords=coords)\n", (6007, 6037), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((6589, 6628), 'xarray.concat', 'concat', (['[ds1, ds2]', '"""y"""'], {'data_vars': 'mode'}), "([ds1, ds2], 'y', data_vars=mode)\n", (6595, 6628), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((6689, 6748), 'pytest.raises', 'pytest.raises', (['merge.MergeError'], {'match': '"""conflicting values"""'}), "(merge.MergeError, match='conflicting values')\n", (6702, 6748), False, 'import pytest\n'), ((6907, 6957), 'xarray.concat', 'concat', (['[ds1, ds2]', '"""new_dim"""'], {'data_vars': '"""minimal"""'}), "([ds1, ds2], 'new_dim', data_vars='minimal')\n", (6913, 6957), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8002, 8061), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""must supply at least one"""'}), "(ValueError, match='must supply at least one')\n", (8015, 8061), False, 'import pytest\n'), ((8076, 8094), 'xarray.concat', 'concat', (['[]', '"""dim1"""'], {}), "([], 'dim1')\n", (8082, 8094), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8109, 8178), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""Cannot specify both .*=\'different\'"""'}), '(ValueError, match="Cannot specify both .*=\'different\'")\n', (8122, 8178), False, 'import pytest\n'), ((8193, 8278), 'xarray.concat', 'concat', (['[data, data]'], {'dim': '"""concat_dim"""', 'data_vars': '"""different"""', 'compat': '"""override"""'}), "([data, data], dim='concat_dim', data_vars='different', compat='override'\n )\n", (8199, 8278), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8318, 8377), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""must supply at least one"""'}), "(ValueError, match='must supply at least one')\n", (8331, 8377), False, 'import pytest\n'), ((8392, 8410), 'xarray.concat', 'concat', (['[]', '"""dim1"""'], {}), "([], 'dim1')\n", (8398, 8410), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8425, 8479), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""are not coordinates"""'}), "(ValueError, match='are not coordinates')\n", (8438, 8479), False, 'import pytest\n'), ((8494, 8547), 'xarray.concat', 'concat', (['[data, data]', '"""new_dim"""'], {'coords': "['not_found']"}), "([data, data], 'new_dim', coords=['not_found'])\n", (8500, 8547), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8562, 8618), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""global attributes not"""'}), "(ValueError, match='global attributes not')\n", (8575, 8618), False, 'import pytest\n'), ((8648, 8668), 'copy.deepcopy', 'deepcopy', (['split_data'], {}), '(split_data)\n', (8656, 8668), False, 'from copy import deepcopy\n'), ((8720, 8770), 'xarray.concat', 'concat', (['[data0, data1]', '"""dim1"""'], {'compat': '"""identical"""'}), "([data0, data1], 'dim1', compat='identical')\n", (8726, 8770), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8802, 8849), 'xarray.concat', 'concat', (['[data0, data1]', '"""dim1"""'], {'compat': '"""equals"""'}), "([data0, data1], 'dim1', compat='equals')\n", (8808, 8849), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8865, 8916), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""compat.* invalid"""'}), "(ValueError, match='compat.* invalid')\n", (8878, 8916), False, 'import pytest\n'), ((8931, 8974), 'xarray.concat', 'concat', (['split_data', '"""dim1"""'], {'compat': '"""foobar"""'}), "(split_data, 'dim1', compat='foobar')\n", (8937, 8974), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((8989, 9044), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""unexpected value for"""'}), "(ValueError, match='unexpected value for')\n", (9002, 9044), False, 'import pytest\n'), ((9059, 9107), 'xarray.concat', 'concat', (['[data, data]', '"""new_dim"""'], {'coords': '"""foobar"""'}), "([data, data], 'new_dim', coords='foobar')\n", (9065, 9107), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9122, 9199), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""coordinate in some datasets but not others"""'}), "(ValueError, match='coordinate in some datasets but not others')\n", (9135, 9199), False, 'import pytest\n'), ((9308, 9385), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""coordinate in some datasets but not others"""'}), "(ValueError, match='coordinate in some datasets but not others')\n", (9321, 9385), False, 'import pytest\n'), ((10471, 10533), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""indexes along dimension \'y\'"""'}), '(ValueError, match="indexes along dimension \'y\'")\n', (10484, 10533), False, 'import pytest\n'), ((10557, 10598), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'join': '"""exact"""', 'dim': '"""x"""'}), "([ds1, ds2], join='exact', dim='x')\n", (10563, 10598), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((10651, 10689), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'join': 'join', 'dim': '"""x"""'}), "([ds1, ds2], join=join, dim='x')\n", (10657, 10689), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((13181, 13237), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""x"""', 'combine_attrs': 'combine_attrs'}), "([ds1, ds2], dim='x', combine_attrs=combine_attrs)\n", (13187, 13237), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((13261, 13327), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [0, 0])}", "{'x': [0, 1]}"], {'attrs': 'expected_attrs'}), "({'a': ('x', [0, 0])}, {'x': [0, 1]}, attrs=expected_attrs)\n", (13268, 13327), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((15608, 15664), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""x"""', 'combine_attrs': 'combine_attrs'}), "([ds1, ds2], dim='x', combine_attrs=combine_attrs)\n", (15614, 15664), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((15688, 15775), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [0, 0], expected_attrs)}", "{'x': ('x', [0, 1], expected_attrs)}"], {}), "({'a': ('x', [0, 0], expected_attrs)}, {'x': ('x', [0, 1],\n expected_attrs)})\n", (15695, 15775), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((15971, 15992), 'xarray.Dataset', 'Dataset', (['{}', "{'x': 0}"], {}), "({}, {'x': 0})\n", (15978, 15992), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((15994, 16013), 'xarray.Dataset', 'Dataset', (["{'x': [1]}"], {}), "({'x': [1]})\n", (16001, 16013), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16152, 16171), 'xarray.Dataset', 'Dataset', (["{'x': [0]}"], {}), "({'x': [0]})\n", (16159, 16171), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16173, 16194), 'xarray.Dataset', 'Dataset', (['{}', "{'x': 1}"], {}), "({}, {'x': 1})\n", (16180, 16194), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16330, 16357), 'xarray.Dataset', 'Dataset', (["{'x': [2], 'y': 3}"], {}), "({'x': [2], 'y': 3})\n", (16337, 16357), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16359, 16386), 'xarray.Dataset', 'Dataset', (["{'x': [4], 'y': 5}"], {}), "({'x': [4], 'y': 5})\n", (16366, 16386), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16584, 16614), 'xarray.Dataset', 'Dataset', (["{'x': [0]}", "{'y': -1}"], {}), "({'x': [0]}, {'y': -1})\n", (16591, 16614), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16616, 16655), 'xarray.Dataset', 'Dataset', (["{'x': [1]}", "{'y': ('x', [-2])}"], {}), "({'x': [1]}, {'y': ('x', [-2])})\n", (16623, 16655), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16896, 16926), 'xarray.Dataset', 'Dataset', (["{'x': [0]}", "{'y': -1}"], {}), "({'x': [0]}, {'y': -1})\n", (16903, 16926), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((16928, 16961), 'xarray.Dataset', 'Dataset', (["{'x': [1, 2]}", "{'y': -2}"], {}), "({'x': [1, 2]}, {'y': -2})\n", (16935, 16961), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17178, 17227), 'xarray.Dataset', 'Dataset', (["{'z': ('x', [-1])}", "{'x': [0], 'y': [0]}"], {}), "({'z': ('x', [-1])}, {'x': [0], 'y': [0]})\n", (17185, 17227), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17241, 17289), 'xarray.Dataset', 'Dataset', (["{'z': ('y', [1])}", "{'x': [1], 'y': [0]}"], {}), "({'z': ('y', [1])}, {'x': [1], 'y': [0]})\n", (17248, 17289), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17561, 17607), 'xarray.Dataset', 'Dataset', (["{'y': ('t', [1])}", "{'x': 1, 't': [0]}"], {}), "({'y': ('t', [1])}, {'x': 1, 't': [0]})\n", (17568, 17607), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17621, 17667), 'xarray.Dataset', 'Dataset', (["{'y': ('t', [2])}", "{'x': 1, 't': [0]}"], {}), "({'y': ('t', [2])}, {'x': 1, 't': [0]})\n", (17628, 17667), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17859, 17905), 'xarray.Dataset', 'Dataset', (["{'y': ('t', [1])}", "{'x': 1, 't': [0]}"], {}), "({'y': ('t', [1])}, {'x': 1, 't': [0]})\n", (17866, 17905), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17919, 17965), 'xarray.Dataset', 'Dataset', (["{'y': ('t', [2])}", "{'x': 2, 't': [0]}"], {}), "({'y': ('t', [2])}, {'x': 2, 't': [0]})\n", (17926, 17965), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((17990, 18015), 'pytest.raises', 'pytest.raises', (['ValueError'], {}), '(ValueError)\n', (18003, 18015), False, 'import pytest\n'), ((18029, 18064), 'xarray.concat', 'concat', (['objs', '"""t"""'], {'coords': '"""minimal"""'}), "(objs, 't', coords='minimal')\n", (18035, 18064), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18133, 18150), 'xarray.Dataset', 'Dataset', (["{'x': 0}"], {}), "({'x': 0})\n", (18140, 18150), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18152, 18169), 'xarray.Dataset', 'Dataset', (["{'x': 1}"], {}), "({'x': 1})\n", (18159, 18169), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18887, 18950), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [2, 3]), 'b': ('x', [-2, 1]), 'x': [1, 2]}"], {}), "({'a': ('x', [2, 3]), 'b': ('x', [-2, 1]), 'x': [1, 2]})\n", (18894, 18950), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((18964, 19027), 'xarray.Dataset', 'Dataset', (["{'a': ('x', [1, 2]), 'b': ('x', [3, -1]), 'x': [0, 1]}"], {}), "({'a': ('x', [1, 2]), 'b': ('x', [3, -1]), 'x': [0, 1]})\n", (18971, 19027), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((20892, 20926), 'numpy.array', 'np.array', (['[foo.values, bar.values]'], {}), '([foo.values, bar.values])\n', (20900, 20926), True, 'import numpy as np\n'), ((21325, 21352), 'pandas.Index', 'pd.Index', (["ds['x']"], {'name': '"""x"""'}), "(ds['x'], name='x')\n", (21333, 21352), True, 'import pandas as pd\n'), ((21767, 21815), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""not identical"""'}), "(ValueError, match='not identical')\n", (21780, 21815), False, 'import pytest\n'), ((21830, 21877), 'xarray.concat', 'concat', (['[foo, bar]'], {'dim': '"""w"""', 'compat': '"""identical"""'}), "([foo, bar], dim='w', compat='identical')\n", (21836, 21877), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((21892, 21947), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""not a valid argument"""'}), "(ValueError, match='not a valid argument')\n", (21905, 21947), False, 'import pytest\n'), ((21962, 22010), 'xarray.concat', 'concat', (['[foo, bar]'], {'dim': '"""w"""', 'data_vars': '"""minimal"""'}), "([foo, bar], dim='w', data_vars='minimal')\n", (21968, 22010), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((24716, 24778), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""indexes along dimension \'y\'"""'}), '(ValueError, match="indexes along dimension \'y\'")\n', (24729, 24778), False, 'import pytest\n'), ((24802, 24843), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'join': '"""exact"""', 'dim': '"""x"""'}), "([ds1, ds2], join='exact', dim='x')\n", (24808, 24843), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((24896, 24934), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'join': 'join', 'dim': '"""x"""'}), "([ds1, ds2], join=join, dim='x')\n", (24902, 24934), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25537, 25597), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""combine_attrs=\'identical\'"""'}), '(ValueError, match="combine_attrs=\'identical\'")\n', (25550, 25597), False, 'import pytest\n'), ((25621, 25675), 'xarray.concat', 'concat', (['[da1, da2]'], {'dim': '"""x"""', 'combine_attrs': '"""identical"""'}), "([da1, da2], dim='x', combine_attrs='identical')\n", (25627, 25675), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25689, 25752), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""combine_attrs=\'no_conflicts\'"""'}), '(ValueError, match="combine_attrs=\'no_conflicts\'")\n', (25702, 25752), False, 'import pytest\n'), ((25846, 25903), 'xarray.concat', 'concat', (['[da1, da3]'], {'dim': '"""x"""', 'combine_attrs': '"""no_conflicts"""'}), "([da1, da3], dim='x', combine_attrs='no_conflicts')\n", (25852, 25903), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((25965, 26021), 'xarray.concat', 'concat', (['[da1, da2]'], {'dim': '"""x"""', 'combine_attrs': 'combine_attrs'}), "([da1, da2], dim='x', combine_attrs=combine_attrs)\n", (25971, 26021), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((28336, 28402), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': '"""\'y\' not present in all datasets"""'}), '(ValueError, match="\'y\' not present in all datasets")\n', (28349, 28402), False, 'import pytest\n'), ((28417, 28449), 'xarray.concat', 'concat', (['[da1, da2, da3]'], {'dim': '"""x"""'}), "([da1, da2, da3], dim='x')\n", (28423, 28449), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((29485, 29516), 'xarray.Dataset', 'Dataset', (["{'foo': 3}", "{'bar': 4}"], {}), "({'foo': 3}, {'bar': 4})\n", (29492, 29516), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((6141, 6200), 'pytest.raises', 'pytest.raises', (['merge.MergeError'], {'match': '"""conflicting values"""'}), "(merge.MergeError, match='conflicting values')\n", (6154, 6200), False, 'import pytest\n'), ((6218, 6254), 'xarray.concat', 'concat', (['objs'], {'dim': '"""x"""', 'coords': 'coords'}), "(objs, dim='x', coords=coords)\n", (6224, 6254), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((7340, 7381), 'xarray.DataArray', 'DataArray', (['[1, 2]'], {'coords': "[('x', [1, 2])]"}), "([1, 2], coords=[('x', [1, 2])])\n", (7349, 7381), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((7414, 7455), 'xarray.DataArray', 'DataArray', (['[1, 2]'], {'coords': "[('x', [1, 3])]"}), "([1, 2], coords=[('x', [1, 3])])\n", (7423, 7455), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((7564, 7654), 'xarray.DataArray', 'DataArray', (['[[1, 2, np.nan], [1, np.nan, 2]]'], {'dims': "['y', 'x']", 'coords': "{'x': [1, 2, 3]}"}), "([[1, 2, np.nan], [1, np.nan, 2]], dims=['y', 'x'], coords={'x': [\n 1, 2, 3]})\n", (7573, 7654), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((13004, 13071), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""combine_attrs=\'{combine_attrs}\'"""'}), '(ValueError, match=f"combine_attrs=\'{combine_attrs}\'")\n', (13017, 13071), False, 'import pytest\n'), ((13089, 13145), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""x"""', 'combine_attrs': 'combine_attrs'}), "([ds1, ds2], dim='x', combine_attrs=combine_attrs)\n", (13095, 13145), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((15431, 15498), 'pytest.raises', 'pytest.raises', (['ValueError'], {'match': 'f"""combine_attrs=\'{combine_attrs}\'"""'}), '(ValueError, match=f"combine_attrs=\'{combine_attrs}\'")\n', (15444, 15498), False, 'import pytest\n'), ((15516, 15572), 'xarray.concat', 'concat', (['[ds1, ds2]'], {'dim': '"""x"""', 'combine_attrs': 'combine_attrs'}), "([ds1, ds2], dim='x', combine_attrs=combine_attrs)\n", (15522, 15572), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((19966, 19978), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (19975, 19978), True, 'import numpy as np\n'), ((20131, 20164), 'numpy.array', 'np.array', (["['a', 'b']"], {'dtype': 'dtype'}), "(['a', 'b'], dtype=dtype)\n", (20139, 20164), True, 'import numpy as np\n'), ((20295, 20311), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (20303, 20311), True, 'import numpy as np\n'), ((20335, 20368), 'numpy.array', 'np.array', (["['c', 'd']"], {'dtype': 'dtype'}), "(['c', 'd'], dtype=dtype)\n", (20343, 20368), True, 'import numpy as np\n'), ((21597, 21629), 'xarray.concat', 'concat', (['[foo[0], foo[1]]', '[0, 1]'], {}), '([foo[0], foo[1]], [0, 1])\n', (21603, 21629), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((22432, 22459), 'xarray.concat', 'concat', (['[foo, foo]'], {'dim': '"""x"""'}), "([foo, foo], dim='x')\n", (22438, 22459), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((22500, 22525), 'xarray.concat', 'concat', (['[ds, ds]'], {'dim': '"""x"""'}), "([ds, ds], dim='x')\n", (22506, 22525), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((23722, 23786), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[0]])}"], {'coords': "{'x': [0], 'y': [0]}"}), "({'a': (('x', 'y'), [[0]])}, coords={'x': [0], 'y': [0]})\n", (23729, 23786), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((23834, 23903), 'xarray.Dataset', 'Dataset', (["{'a': (('x', 'y'), [[0]])}"], {'coords': "{'x': [1], 'y': [0.0001]}"}), "({'a': (('x', 'y'), [[0]])}, coords={'x': [1], 'y': [0.0001]})\n", (23841, 23903), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((26260, 26272), 'numpy.arange', 'np.arange', (['(4)'], {}), '(4)\n', (26269, 26272), True, 'import numpy as np\n'), ((26856, 26883), 'xarray.DataArray', 'DataArray', (['[0, 1]'], {'dims': '"""b"""'}), "([0, 1], dims='b')\n", (26865, 26883), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((26978, 26995), 'xarray.DataArray', 'DataArray', (['[0, 1]'], {}), '([0, 1])\n', (26987, 26995), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((4977, 4996), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (4992, 4996), True, 'import numpy as np\n'), ((5300, 5319), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (5315, 5319), True, 'import numpy as np\n'), ((5699, 5718), 'numpy.random.randn', 'np.random.randn', (['(10)'], {}), '(10)\n', (5714, 5718), True, 'import numpy as np\n'), ((9244, 9261), 'xarray.Dataset', 'Dataset', (["{'x': 0}"], {}), "({'x': 0})\n", (9251, 9261), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9263, 9282), 'xarray.Dataset', 'Dataset', (["{'x': [1]}"], {}), "({'x': [1]})\n", (9270, 9282), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9430, 9447), 'xarray.Dataset', 'Dataset', (["{'x': 0}"], {}), "({'x': 0})\n", (9437, 9447), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((9449, 9470), 'xarray.Dataset', 'Dataset', (['{}', "{'x': 1}"], {}), "({}, {'x': 1})\n", (9456, 9470), False, 'from xarray import DataArray, Dataset, Variable, concat\n'), ((10950, 10975), 'numpy.array', 'np.array', (['[0, 0]'], {'ndmin': '(2)'}), '([0, 0], ndmin=2)\n', (10958, 10975), True, 'import numpy as np\n'), ((20629, 20653), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (20645, 20653), True, 'import numpy as np\n'), ((20692, 20716), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (20708, 20716), True, 'import numpy as np\n'), ((21437, 21453), 'pandas.Index', 'pd.Index', (['[0, 1]'], {}), '([0, 1])\n', (21445, 21453), True, 'import pandas as pd\n'), ((22165, 22189), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (22181, 22189), True, 'import numpy as np\n'), ((22228, 22252), 'numpy.random.random', 'np.random.random', (['(2, 3)'], {}), '((2, 3))\n', (22244, 22252), True, 'import numpy as np\n'), ((26409, 26442), 'numpy.array', 'np.array', (["['a', 'b']"], {'dtype': 'dtype'}), "(['a', 'b'], dtype=dtype)\n", (26417, 26442), True, 'import numpy as np\n'), ((26560, 26576), 'numpy.array', 'np.array', (['[1, 2]'], {}), '([1, 2])\n', (26568, 26576), True, 'import numpy as np\n'), ((26584, 26617), 'numpy.array', 'np.array', (["['c', 'd']"], {'dtype': 'dtype'}), "(['c', 'd'], dtype=dtype)\n", (26592, 26617), True, 'import numpy as np\n'), ((10048, 10078), 'numpy.array', 'np.array', (['[0, np.nan]'], {'ndmin': '(2)'}), '([0, np.nan], ndmin=2)\n', (10056, 10078), True, 'import numpy as np\n'), ((10206, 10236), 'numpy.array', 'np.array', (['[np.nan, 0]'], {'ndmin': '(2)'}), '([np.nan, 0], ndmin=2)\n', (10214, 10236), True, 'import numpy as np\n'), ((10372, 10397), 'numpy.array', 'np.array', (['[0, 0]'], {'ndmin': '(2)'}), '([0, 0], ndmin=2)\n', (10380, 10397), True, 'import numpy as np\n'), ((22733, 22749), 'numpy.zeros', 'np.zeros', (['(3, 3)'], {}), '((3, 3))\n', (22741, 22749), True, 'import numpy as np\n'), ((24293, 24323), 'numpy.array', 'np.array', (['[0, np.nan]'], {'ndmin': '(2)'}), '([0, np.nan], ndmin=2)\n', (24301, 24323), True, 'import numpy as np\n'), ((24451, 24481), 'numpy.array', 'np.array', (['[np.nan, 0]'], {'ndmin': '(2)'}), '([np.nan, 0], ndmin=2)\n', (24459, 24481), True, 'import numpy as np\n'), ((24617, 24642), 'numpy.array', 'np.array', (['[0, 0]'], {'ndmin': '(2)'}), '([0, 0], ndmin=2)\n', (24625, 24642), True, 'import numpy as np\n')] |
"""Script to compare the sensitivity and discovery potential for the LLAGN sample (15887 sources)
as a function of injected spectral index for energy decades between 100 GeV and 10 PeV.
"""
from __future__ import print_function
from __future__ import division
import numpy as np
from flarestack.core.results import ResultsHandler
# # from flarestack.data.icecube.ps_tracks.ps_v002_p01 import ps_7year
# from flarestack.data.icecube.ps_tracks.ps_v003_p02 import ps_10year
# from flarestack.data.icecube.northern_tracks.nt_v002_p05 import diffuse_8year
# from flarestack.data.icecube.gfu.gfu_v002_p01 import txs_sample_v1
from flarestack.shared import plot_output_dir, flux_to_k, make_analysis_pickle, k_to_flux
from flarestack.data.icecube import diffuse_8_year
from flarestack.utils.catalogue_loader import load_catalogue
from flarestack.analyses.agn_cores.shared_agncores import \
agn_subset_catalogue, complete_cats_north, complete_cats_north, agn_catalogue_name, agn_subset_catalogue_north
from flarestack.core.minimisation import MinimisationHandler
from flarestack.cluster import analyse, wait_for_cluster
import math
import matplotlib.pyplot as plt
from matplotlib.ticker import ScalarFormatter
# plt.style.use('~/scratch/phdthesis.mpltstyle')
import time
import logging
import os
import psutil, resource #to get memory usage info
analyses = dict()
# Initialise Injectors/LLHs
llh_time = {
"time_pdf_name": "Steady"
}
llh_energy = {
"energy_pdf_name": "PowerLaw"
}
llh_dict = {
"llh_name": "standard_matrix",
"llh_sig_time_pdf": llh_time,
"llh_energy_pdf": llh_energy
}
def base_name(cat_key, gamma):
return "analyses/agn_cores/stacking_analysis_8yrNTsample_diff_sens_pre_unblinding/{0}/" \
"{1}/".format(cat_key, gamma)
def generate_name(cat_key, n_sources, gamma):
return base_name(cat_key, gamma) + "NrSrcs={0}/".format(n_sources)
gammas = [2.0, 2.5]
# Number of sources in the LLAGN sample
nr_brightest_sources = [15887]
# Energy bins
energies = np.logspace(2, 7, 6)
bins = list(zip(energies[:-1], energies[1:]))
all_res = dict()
for (cat_type, method) in complete_cats_north[-1:]:
unique_key = cat_type + "_" + method
print(unique_key)
gamma_dict = dict()
for gamma_index in gammas:
res = dict()
for j, nr_srcs in enumerate(nr_brightest_sources):
cat_path = agn_subset_catalogue(cat_type, method, nr_srcs)
print("Loading catalogue", cat_path, " with ", nr_srcs, "sources")
catalogue = load_catalogue(cat_path)
cat = np.load(cat_path)
print("Total flux is: ", cat['base_weight'].sum()*1e-13)
full_name = generate_name(unique_key, nr_srcs, gamma_index)
res_e_min = dict()
# scale factor of neutrino injection, tuned for each energy bin
scale_factor_per_decade = [0.2, 0.5, 1, 0.57, 0.29]
for i, (e_min, e_max) in enumerate(bins[:]):
full_name_en = full_name + 'Emin={0:.2f}'.format(e_min) + "/"
print("Full name for ", nr_srcs, " sources is", full_name_en)
# Injection parameters
injection_time = llh_time
injection_energy = dict(llh_energy)
injection_energy["gamma"] = gamma_index
injection_energy["e_min_gev"] = e_min
injection_energy["e_max_gev"] = e_max
inj_kwargs = {
"injection_energy_pdf": injection_energy,
"injection_sig_time_pdf": injection_time,
}
mh_dict = {
"name": full_name_en,
"mh_name": "large_catalogue",
"dataset": diffuse_8_year.get_seasons(), #subselection_fraction=1),
"catalogue": cat_path,
"llh_dict": llh_dict,
"inj_dict": inj_kwargs,
"n_trials": 1, #10,
# "n_steps": 15,
}
mh = MinimisationHandler.create(mh_dict)
# scale factor to tune (manually) the number of injected neutrinos
scale_factor = 3 * mh.guess_scale()/3/7/scale_factor_per_decade[i]
print("Scale Factor: ", scale_factor_per_decade[i], scale_factor)
# # # # # How to run on the cluster for sources < 3162
mh_dict["n_steps"] = 15
mh_dict["scale"] = scale_factor
analyse(mh_dict, cluster=False, n_cpu=32, n_jobs=150)
# How to run on the cluster for sources > 3162
# _n_jobs = 50
# scale_loop = np.linspace(0, scale_factor, 15)
# print(scale_loop)
# for scale in scale_loop[:4]:
# print('Running ' + str(mh_dict["n_trials"]) + ' trials with scale ' + str(scale))
# mh_dict["fixed_scale"] = scale
# # # analyse(mh_dict, cluster=False, n_cpu=35, n_jobs=10)
# if scale == 0.:
# n_jobs = _n_jobs*10
# else:
# n_jobs = _n_jobs
# print("Submitting " + str(n_jobs) + " jobs")
# analyse(mh_dict, cluster=True, n_cpu=1, n_jobs=n_jobs)
res_e_min[e_min] = mh_dict
res[nr_srcs] = res_e_min
gamma_dict[gamma_index] = res
all_res[unique_key] = gamma_dict
# wait_for_cluster()
logging.getLogger().setLevel("INFO")
for (cat_key, gamma_dict) in all_res.items():
print(cat_key, cat_key.split("_"))
agn_type = cat_key.split("_")[0]
xray_cat = cat_key.split(str(agn_type)+'_')[-1]
full_cat = load_catalogue(agn_catalogue_name(agn_type, xray_cat))
full_flux = np.sum(full_cat["base_weight"])
saturate_ratio = 0.26
# Loop on gamma
for (gamma_index, gamma_res) in (iter(gamma_dict.items())):
sens = []
sens_err_low = []
sens_err_upp = []
disc_pot = []
disc_ts_threshold = []
n_src = []
fracs = []
sens_livetime = []
disc_pots_livetime = []
sens_livetime_100GeV10PeV = []
disc_pots_livetime_100GeV10PeV = []
ratio_sens = []
ratio_disc = []
ratio_sens_100GeV10PeV = []
ratio_disc_100GeV10PeV = []
int_xray_flux_erg = []
int_xray_flux = []
guess = []
sens_n = []
disc_pot_n = []
e_min_gev = []
e_max_gev = []
base_dir = base_name(cat_key, gamma_index)
# Loop on number of sources of the AGN sample
for (nr_srcs, rh_dict_srcs) in sorted(gamma_res.items()):
print("In if loop on nr_srcs and rh_dict")
print(nr_srcs)
print(rh_dict_srcs)
print("nr_srcs in loop: ", nr_srcs)
# loop on emin and emax
for (e_min, rh_dict) in sorted(rh_dict_srcs.items()):
cat = load_catalogue(rh_dict["catalogue"])
print("e_min in loop: ", e_min)
print(" ")
print(" ")
int_xray = np.sum(cat["base_weight"] / 1e13*624.151)
int_xray_flux.append(int_xray) # GeV cm-2 s-1
int_xray_flux_erg.append(np.sum(cat["base_weight"]) / 1e13) # erg
# cm-2 s-1
fracs.append(np.sum(cat["base_weight"])/full_flux)
try:
rh = ResultsHandler(rh_dict)
print("Sens", rh.sensitivity)
print("Sens_err", rh.sensitivity_err, rh.sensitivity_err[0], rh.sensitivity_err[1])
print("Disc", rh.disc_potential)
print("Disc_TS_threshold", rh.disc_ts_threshold)
# print("Guess", rh_dict["scale"])
print("Sens (n)", rh.sensitivity * rh.flux_to_ns)
print("DP (n)", rh.disc_potential * rh.flux_to_ns)
# # guess.append(k_to_flux(rh_dict["scale"])* 2./3.)
# guess.append(k_to_flux(rh_dict["scale"])/3.)
print(rh_dict["inj_dict"], rh_dict["inj_dict"]["injection_energy_pdf"]["e_min_gev"])
e_min_gev.append(rh_dict["inj_dict"]["injection_energy_pdf"]["e_min_gev"])
e_max_gev.append(rh_dict["inj_dict"]["injection_energy_pdf"]["e_max_gev"])
# sensitivity/dp normalized per flux normalization GeV-1 cm-2 s-1
sens.append(rh.sensitivity)
sens_err_low.append(rh.sensitivity_err[0])
sens_err_upp.append(rh.sensitivity_err[1])
disc_pot.append(rh.disc_potential)
disc_ts_threshold.append(rh.disc_ts_threshold)
sens_n.append(rh.sensitivity * rh.flux_to_ns)
disc_pot_n.append(rh.disc_potential * rh.flux_to_ns)
key = "Energy Flux (GeV cm^{-2} s^{-1})" # old version: "Total Fluence (GeV cm^{-2} s^{-1})"
astro_sens, astro_disc = rh.astro_values(
rh_dict["inj_dict"]["injection_energy_pdf"])
sens_livetime.append(astro_sens[key]) # fluence=integrated over energy
disc_pots_livetime.append(astro_disc[key])
# Nu energy flux integrated between 100GeV and 10PeV,
# indipendently from the e_min_gev, e_max_gev of the injection
rh_dict["inj_dict"]["injection_energy_pdf"]["e_min_gev"] = 100
rh_dict["inj_dict"]["injection_energy_pdf"]["e_max_gev"] = 1e7
astro_sens_100GeV10PeV, astro_disc_100GeV10PeV = rh.astro_values(
rh_dict["inj_dict"]["injection_energy_pdf"])
sens_livetime_100GeV10PeV.append(astro_sens_100GeV10PeV[key]) # fluence=integrated over energy
disc_pots_livetime_100GeV10PeV.append(astro_disc_100GeV10PeV[key])
# normalized over tot xray flux
ratio_sens.append(astro_sens[key] / int_xray) # fluence
ratio_disc.append(astro_disc[key] / int_xray)
ratio_sens_100GeV10PeV.append(astro_sens_100GeV10PeV[key] / int_xray) # fluence
ratio_disc_100GeV10PeV.append(astro_disc_100GeV10PeV[key] / int_xray)
n_src.append(nr_srcs)
except OSError:
pass
# # Save arrays to file
np.savetxt(plot_output_dir(base_dir) + "data.out",
(np.array(n_src), np.array(int_xray_flux_erg),
np.array(e_min_gev), np.array(e_max_gev),
np.array(sens), np.array(sens_err_low), np.array(sens_err_upp),
np.array(disc_pot), np.array(disc_ts_threshold),
np.array(sens_livetime), np.array(disc_pots_livetime),
np.array(ratio_sens), np.array(ratio_disc),
np.array(ratio_sens)/saturate_ratio, np.array(ratio_disc)/saturate_ratio,
np.array(sens_livetime_100GeV10PeV), np.array(disc_pots_livetime_100GeV10PeV),
np.array(ratio_sens_100GeV10PeV), np.array(ratio_disc_100GeV10PeV),
np.array(ratio_sens_100GeV10PeV)/saturate_ratio, np.array(ratio_disc_100GeV10PeV)/saturate_ratio,
np.array(sens_n), np.array(disc_pot_n)),
header="n_src, int_xray_flux_erg, "
"e_min_gev, e_max_gev"
"sensitivity, sensitivity_err_lower, sensitivity_err_upper,"
"dp, disc_ts_threshold,"
"int_sensitivity, int_dp, ratio_sens, ratio_dp,"
"ratio_sens_saturate, ratio_dp_saturate,"
"int_sensitivity_100GeV10PeV, int_dp_100GeV10PeV, ratio_sens_100GeV10PeV, ratio_dp_100GeV10PeV,"
"ratio_sens_saturate_100GeV10PeV, ratio_dp_saturate_100GeV10PeV,"
"sensitivity_nr_neutrinos, dp_nr_neutrinos")
| [
"logging.getLogger",
"flarestack.cluster.analyse",
"flarestack.data.icecube.diffuse_8_year.get_seasons",
"flarestack.utils.catalogue_loader.load_catalogue",
"flarestack.analyses.agn_cores.shared_agncores.agn_subset_catalogue",
"flarestack.shared.plot_output_dir",
"flarestack.core.results.ResultsHandler"... | [((2018, 2038), 'numpy.logspace', 'np.logspace', (['(2)', '(7)', '(6)'], {}), '(2, 7, 6)\n', (2029, 2038), True, 'import numpy as np\n'), ((5824, 5855), 'numpy.sum', 'np.sum', (["full_cat['base_weight']"], {}), "(full_cat['base_weight'])\n", (5830, 5855), True, 'import numpy as np\n'), ((5522, 5541), 'logging.getLogger', 'logging.getLogger', ([], {}), '()\n', (5539, 5541), False, 'import logging\n'), ((5767, 5805), 'flarestack.analyses.agn_cores.shared_agncores.agn_catalogue_name', 'agn_catalogue_name', (['agn_type', 'xray_cat'], {}), '(agn_type, xray_cat)\n', (5785, 5805), False, 'from flarestack.analyses.agn_cores.shared_agncores import agn_subset_catalogue, complete_cats_north, complete_cats_north, agn_catalogue_name, agn_subset_catalogue_north\n'), ((2382, 2429), 'flarestack.analyses.agn_cores.shared_agncores.agn_subset_catalogue', 'agn_subset_catalogue', (['cat_type', 'method', 'nr_srcs'], {}), '(cat_type, method, nr_srcs)\n', (2402, 2429), False, 'from flarestack.analyses.agn_cores.shared_agncores import agn_subset_catalogue, complete_cats_north, complete_cats_north, agn_catalogue_name, agn_subset_catalogue_north\n'), ((2533, 2557), 'flarestack.utils.catalogue_loader.load_catalogue', 'load_catalogue', (['cat_path'], {}), '(cat_path)\n', (2547, 2557), False, 'from flarestack.utils.catalogue_loader import load_catalogue\n'), ((2576, 2593), 'numpy.load', 'np.load', (['cat_path'], {}), '(cat_path)\n', (2583, 2593), True, 'import numpy as np\n'), ((4051, 4086), 'flarestack.core.minimisation.MinimisationHandler.create', 'MinimisationHandler.create', (['mh_dict'], {}), '(mh_dict)\n', (4077, 4086), False, 'from flarestack.core.minimisation import MinimisationHandler\n'), ((4511, 4564), 'flarestack.cluster.analyse', 'analyse', (['mh_dict'], {'cluster': '(False)', 'n_cpu': '(32)', 'n_jobs': '(150)'}), '(mh_dict, cluster=False, n_cpu=32, n_jobs=150)\n', (4518, 4564), False, 'from flarestack.cluster import analyse, wait_for_cluster\n'), ((7022, 7058), 'flarestack.utils.catalogue_loader.load_catalogue', 'load_catalogue', (["rh_dict['catalogue']"], {}), "(rh_dict['catalogue'])\n", (7036, 7058), False, 'from flarestack.utils.catalogue_loader import load_catalogue\n'), ((7193, 7248), 'numpy.sum', 'np.sum', (["(cat['base_weight'] / 10000000000000.0 * 624.151)"], {}), "(cat['base_weight'] / 10000000000000.0 * 624.151)\n", (7199, 7248), True, 'import numpy as np\n'), ((10598, 10623), 'flarestack.shared.plot_output_dir', 'plot_output_dir', (['base_dir'], {}), '(base_dir)\n', (10613, 10623), False, 'from flarestack.shared import plot_output_dir, flux_to_k, make_analysis_pickle, k_to_flux\n'), ((10658, 10673), 'numpy.array', 'np.array', (['n_src'], {}), '(n_src)\n', (10666, 10673), True, 'import numpy as np\n'), ((10675, 10702), 'numpy.array', 'np.array', (['int_xray_flux_erg'], {}), '(int_xray_flux_erg)\n', (10683, 10702), True, 'import numpy as np\n'), ((10724, 10743), 'numpy.array', 'np.array', (['e_min_gev'], {}), '(e_min_gev)\n', (10732, 10743), True, 'import numpy as np\n'), ((10745, 10764), 'numpy.array', 'np.array', (['e_max_gev'], {}), '(e_max_gev)\n', (10753, 10764), True, 'import numpy as np\n'), ((10786, 10800), 'numpy.array', 'np.array', (['sens'], {}), '(sens)\n', (10794, 10800), True, 'import numpy as np\n'), ((10802, 10824), 'numpy.array', 'np.array', (['sens_err_low'], {}), '(sens_err_low)\n', (10810, 10824), True, 'import numpy as np\n'), ((10826, 10848), 'numpy.array', 'np.array', (['sens_err_upp'], {}), '(sens_err_upp)\n', (10834, 10848), True, 'import numpy as np\n'), ((10870, 10888), 'numpy.array', 'np.array', (['disc_pot'], {}), '(disc_pot)\n', (10878, 10888), True, 'import numpy as np\n'), ((10890, 10917), 'numpy.array', 'np.array', (['disc_ts_threshold'], {}), '(disc_ts_threshold)\n', (10898, 10917), True, 'import numpy as np\n'), ((10939, 10962), 'numpy.array', 'np.array', (['sens_livetime'], {}), '(sens_livetime)\n', (10947, 10962), True, 'import numpy as np\n'), ((10964, 10992), 'numpy.array', 'np.array', (['disc_pots_livetime'], {}), '(disc_pots_livetime)\n', (10972, 10992), True, 'import numpy as np\n'), ((11014, 11034), 'numpy.array', 'np.array', (['ratio_sens'], {}), '(ratio_sens)\n', (11022, 11034), True, 'import numpy as np\n'), ((11036, 11056), 'numpy.array', 'np.array', (['ratio_disc'], {}), '(ratio_disc)\n', (11044, 11056), True, 'import numpy as np\n'), ((11172, 11207), 'numpy.array', 'np.array', (['sens_livetime_100GeV10PeV'], {}), '(sens_livetime_100GeV10PeV)\n', (11180, 11207), True, 'import numpy as np\n'), ((11209, 11249), 'numpy.array', 'np.array', (['disc_pots_livetime_100GeV10PeV'], {}), '(disc_pots_livetime_100GeV10PeV)\n', (11217, 11249), True, 'import numpy as np\n'), ((11271, 11303), 'numpy.array', 'np.array', (['ratio_sens_100GeV10PeV'], {}), '(ratio_sens_100GeV10PeV)\n', (11279, 11303), True, 'import numpy as np\n'), ((11305, 11337), 'numpy.array', 'np.array', (['ratio_disc_100GeV10PeV'], {}), '(ratio_disc_100GeV10PeV)\n', (11313, 11337), True, 'import numpy as np\n'), ((11477, 11493), 'numpy.array', 'np.array', (['sens_n'], {}), '(sens_n)\n', (11485, 11493), True, 'import numpy as np\n'), ((11495, 11515), 'numpy.array', 'np.array', (['disc_pot_n'], {}), '(disc_pot_n)\n', (11503, 11515), True, 'import numpy as np\n'), ((3748, 3776), 'flarestack.data.icecube.diffuse_8_year.get_seasons', 'diffuse_8_year.get_seasons', ([], {}), '()\n', (3774, 3776), False, 'from flarestack.data.icecube import diffuse_8_year\n'), ((7520, 7543), 'flarestack.core.results.ResultsHandler', 'ResultsHandler', (['rh_dict'], {}), '(rh_dict)\n', (7534, 7543), False, 'from flarestack.core.results import ResultsHandler\n'), ((11078, 11098), 'numpy.array', 'np.array', (['ratio_sens'], {}), '(ratio_sens)\n', (11086, 11098), True, 'import numpy as np\n'), ((11115, 11135), 'numpy.array', 'np.array', (['ratio_disc'], {}), '(ratio_disc)\n', (11123, 11135), True, 'import numpy as np\n'), ((11359, 11391), 'numpy.array', 'np.array', (['ratio_sens_100GeV10PeV'], {}), '(ratio_sens_100GeV10PeV)\n', (11367, 11391), True, 'import numpy as np\n'), ((11408, 11440), 'numpy.array', 'np.array', (['ratio_disc_100GeV10PeV'], {}), '(ratio_disc_100GeV10PeV)\n', (11416, 11440), True, 'import numpy as np\n'), ((7338, 7364), 'numpy.sum', 'np.sum', (["cat['base_weight']"], {}), "(cat['base_weight'])\n", (7344, 7364), True, 'import numpy as np\n'), ((7435, 7461), 'numpy.sum', 'np.sum', (["cat['base_weight']"], {}), "(cat['base_weight'])\n", (7441, 7461), True, 'import numpy as np\n')] |
"""
refenrence from: https://learnopencv.com/video-stabilization-using-point-feature-matching-in-opencv/
"""
import cv2
import numpy as np
from scipy.signal import savgol_filter
fname = "./deep-stabilization/dvs/video/s_114_outdoor_running_trail_daytime/ControlCam_20200930_104820.mp4"
# fname = "./deep-stabilization/dvs/test/stabilzation/s_114_outdoor_running_trail_daytime_stab.mp4"
cap = cv2.VideoCapture(fname)
# Get metadata
n_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
fps = 30
# Read first frame
_, prev = cap.read()
# Convert frame to grayscale
prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
# prev_gray = (prev_gray&192)|((prev_gray&32)<<1)
# Pre-define transformation-store array
transforms = np.zeros((n_frames-1, 3), np.float32)
for i in range(n_frames-2):
# Detect feature points in previous frame
prev_pts = cv2.goodFeaturesToTrack(prev_gray,
# maxCorners=1000,
# qualityLevel=0.2,
# minDistance=10,
# blockSize=5)
maxCorners=400,
qualityLevel=0.3,
minDistance=30,
blockSize=9)
# criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prev_pts = cv2.cornerSubPix( prev_gray, prev_pts, (5,5), (-1,1), criteria )
# Read next frame
success, curr = cap.read()
if not success:
break
# Convert to grayscale
curr_gray = cv2.cvtColor(curr, cv2.COLOR_BGR2GRAY)
# Calculate optical flow (i.e. track feature points)
curr_pts, status, err = cv2.calcOpticalFlowPyrLK(prev_gray, curr_gray, prev_pts, None)
# Filter only valid points
idx = np.where(status==1)[0]
prev_pts = prev_pts[idx]
curr_pts = curr_pts[idx]
#Find transformation matrix
retval, inliers = cv2.estimateAffine2D(prev_pts, curr_pts)
# retval = cv2.findHomography(prev_pts, curr_pts)[0]
# Extract traslation and rotation angle
dx = retval[0][2]
dy = retval[1][2]
da = np.arctan2(retval[1,0], retval[0,0])
# Store transformation
transforms[i] = [dx,dy,da]
# Move to next frame
prev_gray = curr_gray
print("Frame: {:03d}/{:3d} - Tracked points : {:3d}".format(i, n_frames, len(prev_pts)), end="\r", flush=True)
# Compute trajectory using cumulative sum of transformations
print("transforms: ", len(transforms))
trajectory = np.cumsum(transforms, axis=0)
def movingAverage(curve, window_size):
assert window_size%2, "window_size should be odd"
# Define the filter
f = np.ones(window_size)/window_size
# Add padding to the boundaries
curve_pad = np.lib.pad(curve, (window_size//2, window_size//2), 'edge')
# Apply convolution
return np.convolve(curve_pad, f, mode='valid')
return savgol_filter(curve, window_size, 3)
def movingAverage(curve, radius):
window_size = 2 * radius + 1
# Define the filter
f = np.ones(window_size)/window_size
# Add padding to the boundaries
curve_pad = np.lib.pad(curve, (radius, radius), 'edge')
# Apply convolution
curve_smoothed = np.convolve(curve_pad, f, mode='same')
# Remove padding
curve_smoothed = curve_smoothed[radius:-radius]
# return smoothed curve
return savgol_filter(curve, window_size, 3)
# return curve_smoothed
def fixBorder(frame):
s = frame.shape
# Scale the image 4% without moving the center
T = cv2.getRotationMatrix2D((s[1]/2, s[0]/2), 0, 1.04)
frame = cv2.warpAffine(frame, T, (s[1], s[0]))
return frame
def smooth(trajectory, SMOOTHING_RADIUS=30):
smoothed_trajectory = np.copy(trajectory)
for i in range(3):
smoothed_trajectory[:,i] = movingAverage(trajectory[:,i], SMOOTHING_RADIUS)
return smoothed_trajectory
# Calculate difference in smoothed_trajectory and trajectory
difference = smooth(trajectory) - trajectory
transforms_smooth = transforms + difference
# Reset stream to first frame
cap.set(cv2.CAP_PROP_POS_FRAMES, 0)
frames=[]
# Write n_frames-1 transformed frames
fourcc = cv2.VideoWriter_fourcc(*'mp4v')
out = cv2.VideoWriter('../video_out.mp4', fourcc, fps, (w, h))
for i in range(n_frames-2):
# Read next frame
success, frame = cap.read()
if not success:
break
# Extract transformations from the new transformation array
dx = transforms_smooth[i,0]
dy = transforms_smooth[i,1]
da = transforms_smooth[i,2]
# Reconstruct transformation matrix accordingly to new values
m = np.zeros((2,3), np.float32)
m[0,0] = np.cos(da)
m[0,1] = -np.sin(da)
m[1,0] = np.sin(da)
m[1,1] = np.cos(da)
m[0,2] = dx
m[1,2] = dy
# Apply affine wrapping to the given frame
frame_stabilized = cv2.warpAffine(frame.astype(np.float64)/255, m, (w,h))
# frame_stabilized = cv2.warpPerspective(frame.astype(np.float64)/255, m, (w,h))
# Fix border artifacts
# frame_stabilized = fixBorder(frame_stabilized)
# Write the frame to the file
frame_out = cv2.hconcat([frame.astype(np.float64)/255, frame_stabilized])
# If the image is too big, resize it.
if frame_out.shape[1] > 1920:
frame_out = cv2.resize(frame_out, (frame_out.shape[1]//2, frame_out.shape[0]));
frames.append(frame_out)
out.write((frame_out*255).astype(np.uint8))
out.release() | [
"numpy.convolve",
"scipy.signal.savgol_filter",
"numpy.lib.pad",
"numpy.arctan2",
"numpy.sin",
"numpy.where",
"cv2.estimateAffine2D",
"cv2.VideoWriter",
"cv2.VideoWriter_fourcc",
"cv2.warpAffine",
"numpy.ones",
"numpy.cos",
"cv2.cvtColor",
"cv2.getRotationMatrix2D",
"cv2.resize",
"nump... | [((393, 416), 'cv2.VideoCapture', 'cv2.VideoCapture', (['fname'], {}), '(fname)\n', (409, 416), False, 'import cv2\n'), ((664, 702), 'cv2.cvtColor', 'cv2.cvtColor', (['prev', 'cv2.COLOR_BGR2GRAY'], {}), '(prev, cv2.COLOR_BGR2GRAY)\n', (676, 702), False, 'import cv2\n'), ((807, 846), 'numpy.zeros', 'np.zeros', (['(n_frames - 1, 3)', 'np.float32'], {}), '((n_frames - 1, 3), np.float32)\n', (815, 846), True, 'import numpy as np\n'), ((2644, 2673), 'numpy.cumsum', 'np.cumsum', (['transforms'], {'axis': '(0)'}), '(transforms, axis=0)\n', (2653, 2673), True, 'import numpy as np\n'), ((4288, 4319), 'cv2.VideoWriter_fourcc', 'cv2.VideoWriter_fourcc', (["*'mp4v'"], {}), "(*'mp4v')\n", (4310, 4319), False, 'import cv2\n'), ((4326, 4382), 'cv2.VideoWriter', 'cv2.VideoWriter', (['"""../video_out.mp4"""', 'fourcc', 'fps', '(w, h)'], {}), "('../video_out.mp4', fourcc, fps, (w, h))\n", (4341, 4382), False, 'import cv2\n'), ((934, 1035), 'cv2.goodFeaturesToTrack', 'cv2.goodFeaturesToTrack', (['prev_gray'], {'maxCorners': '(400)', 'qualityLevel': '(0.3)', 'minDistance': '(30)', 'blockSize': '(9)'}), '(prev_gray, maxCorners=400, qualityLevel=0.3,\n minDistance=30, blockSize=9)\n', (957, 1035), False, 'import cv2\n'), ((1695, 1733), 'cv2.cvtColor', 'cv2.cvtColor', (['curr', 'cv2.COLOR_BGR2GRAY'], {}), '(curr, cv2.COLOR_BGR2GRAY)\n', (1707, 1733), False, 'import cv2\n'), ((1821, 1883), 'cv2.calcOpticalFlowPyrLK', 'cv2.calcOpticalFlowPyrLK', (['prev_gray', 'curr_gray', 'prev_pts', 'None'], {}), '(prev_gray, curr_gray, prev_pts, None)\n', (1845, 1883), False, 'import cv2\n'), ((2064, 2104), 'cv2.estimateAffine2D', 'cv2.estimateAffine2D', (['prev_pts', 'curr_pts'], {}), '(prev_pts, curr_pts)\n', (2084, 2104), False, 'import cv2\n'), ((2260, 2298), 'numpy.arctan2', 'np.arctan2', (['retval[1, 0]', 'retval[0, 0]'], {}), '(retval[1, 0], retval[0, 0])\n', (2270, 2298), True, 'import numpy as np\n'), ((2886, 2949), 'numpy.lib.pad', 'np.lib.pad', (['curve', '(window_size // 2, window_size // 2)', '"""edge"""'], {}), "(curve, (window_size // 2, window_size // 2), 'edge')\n", (2896, 2949), True, 'import numpy as np\n'), ((2981, 3020), 'numpy.convolve', 'np.convolve', (['curve_pad', 'f'], {'mode': '"""valid"""'}), "(curve_pad, f, mode='valid')\n", (2992, 3020), True, 'import numpy as np\n'), ((3032, 3068), 'scipy.signal.savgol_filter', 'savgol_filter', (['curve', 'window_size', '(3)'], {}), '(curve, window_size, 3)\n', (3045, 3068), False, 'from scipy.signal import savgol_filter\n'), ((3254, 3297), 'numpy.lib.pad', 'np.lib.pad', (['curve', '(radius, radius)', '"""edge"""'], {}), "(curve, (radius, radius), 'edge')\n", (3264, 3297), True, 'import numpy as np\n'), ((3343, 3381), 'numpy.convolve', 'np.convolve', (['curve_pad', 'f'], {'mode': '"""same"""'}), "(curve_pad, f, mode='same')\n", (3354, 3381), True, 'import numpy as np\n'), ((3494, 3530), 'scipy.signal.savgol_filter', 'savgol_filter', (['curve', 'window_size', '(3)'], {}), '(curve, window_size, 3)\n', (3507, 3530), False, 'from scipy.signal import savgol_filter\n'), ((3661, 3715), 'cv2.getRotationMatrix2D', 'cv2.getRotationMatrix2D', (['(s[1] / 2, s[0] / 2)', '(0)', '(1.04)'], {}), '((s[1] / 2, s[0] / 2), 0, 1.04)\n', (3684, 3715), False, 'import cv2\n'), ((3724, 3762), 'cv2.warpAffine', 'cv2.warpAffine', (['frame', 'T', '(s[1], s[0])'], {}), '(frame, T, (s[1], s[0]))\n', (3738, 3762), False, 'import cv2\n'), ((3852, 3871), 'numpy.copy', 'np.copy', (['trajectory'], {}), '(trajectory)\n', (3859, 3871), True, 'import numpy as np\n'), ((4736, 4764), 'numpy.zeros', 'np.zeros', (['(2, 3)', 'np.float32'], {}), '((2, 3), np.float32)\n', (4744, 4764), True, 'import numpy as np\n'), ((4777, 4787), 'numpy.cos', 'np.cos', (['da'], {}), '(da)\n', (4783, 4787), True, 'import numpy as np\n'), ((4826, 4836), 'numpy.sin', 'np.sin', (['da'], {}), '(da)\n', (4832, 4836), True, 'import numpy as np\n'), ((4850, 4860), 'numpy.cos', 'np.cos', (['da'], {}), '(da)\n', (4856, 4860), True, 'import numpy as np\n'), ((1927, 1948), 'numpy.where', 'np.where', (['(status == 1)'], {}), '(status == 1)\n', (1935, 1948), True, 'import numpy as np\n'), ((2801, 2821), 'numpy.ones', 'np.ones', (['window_size'], {}), '(window_size)\n', (2808, 2821), True, 'import numpy as np\n'), ((3169, 3189), 'numpy.ones', 'np.ones', (['window_size'], {}), '(window_size)\n', (3176, 3189), True, 'import numpy as np\n'), ((4802, 4812), 'numpy.sin', 'np.sin', (['da'], {}), '(da)\n', (4808, 4812), True, 'import numpy as np\n'), ((5396, 5464), 'cv2.resize', 'cv2.resize', (['frame_out', '(frame_out.shape[1] // 2, frame_out.shape[0])'], {}), '(frame_out, (frame_out.shape[1] // 2, frame_out.shape[0]))\n', (5406, 5464), False, 'import cv2\n')] |
import numpy as np
import gdal
def create_mask_from_vector(vector_data_path, cols, rows, geo_transform,
projection, target_value=1):
"""Rasterize the given vector (wrapper for gdal.RasterizeLayer)."""
data_source = gdal.OpenEx(vector_data_path, gdal.OF_VECTOR)
layer = data_source.GetLayer(0)
driver = gdal.GetDriverByName('MEM') # In memory dataset
target_ds = driver.Create('', cols, rows, 1, gdal.GDT_UInt16)
target_ds.SetGeoTransform(geo_transform)
target_ds.SetProjection(projection)
gdal.RasterizeLayer(target_ds, [1], layer, burn_values=[target_value])
return target_ds
def vectors_to_raster(file_paths, rows, cols, geo_transform, projection):
"""Rasterize the vectors in the given directory in a single image."""
labeled_pixels = np.zeros((rows, cols))
for i, path in enumerate(file_paths):
label = i+1
ds = create_mask_from_vector(path, cols, rows, geo_transform,
projection, target_value=label)
band = ds.GetRasterBand(1)
labeled_pixels += band.ReadAsArray()
ds = None
return labeled_pixels
def write_geotiff(fname, data, geo_transform, projection):
"""Create a GeoTIFF file with the given data."""
driver = gdal.GetDriverByName('GTiff')
rows, cols = data.shape
dataset = driver.Create(fname, cols, rows, 1, gdal.GDT_Byte)
dataset.SetGeoTransform(geo_transform)
dataset.SetProjection(projection)
band = dataset.GetRasterBand(1)
band.WriteArray(data)
dataset = None # Close the file | [
"numpy.zeros",
"gdal.OpenEx",
"gdal.RasterizeLayer",
"gdal.GetDriverByName"
] | [((254, 299), 'gdal.OpenEx', 'gdal.OpenEx', (['vector_data_path', 'gdal.OF_VECTOR'], {}), '(vector_data_path, gdal.OF_VECTOR)\n', (265, 299), False, 'import gdal\n'), ((349, 376), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""MEM"""'], {}), "('MEM')\n", (369, 376), False, 'import gdal\n'), ((553, 623), 'gdal.RasterizeLayer', 'gdal.RasterizeLayer', (['target_ds', '[1]', 'layer'], {'burn_values': '[target_value]'}), '(target_ds, [1], layer, burn_values=[target_value])\n', (572, 623), False, 'import gdal\n'), ((816, 838), 'numpy.zeros', 'np.zeros', (['(rows, cols)'], {}), '((rows, cols))\n', (824, 838), True, 'import numpy as np\n'), ((1291, 1320), 'gdal.GetDriverByName', 'gdal.GetDriverByName', (['"""GTiff"""'], {}), "('GTiff')\n", (1311, 1320), False, 'import gdal\n')] |
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for math_ops.bincount."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors
from tensorflow.python.framework import test_util
from tensorflow.python.ops import math_ops
from tensorflow.python.platform import googletest
class BincountTest(test_util.TensorFlowTestCase):
def test_empty(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(
math_ops.bincount([], minlength=5).eval(), [0, 0, 0, 0, 0])
self.assertAllEqual(math_ops.bincount([], minlength=1).eval(), [0])
self.assertAllEqual(math_ops.bincount([], minlength=0).eval(), [])
self.assertEqual(
math_ops.bincount([], minlength=0, dtype=np.float32).eval().dtype,
np.float32)
self.assertEqual(
math_ops.bincount([], minlength=3, dtype=np.float64).eval().dtype,
np.float64)
def test_values(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(
math_ops.bincount([1, 1, 1, 2, 2, 3]).eval(), [0, 3, 2, 1])
arr = [1, 1, 2, 1, 2, 3, 1, 2, 3, 4, 1, 2, 3, 4, 5]
self.assertAllEqual(math_ops.bincount(arr).eval(), [0, 5, 4, 3, 2, 1])
arr += [0, 0, 0, 0, 0, 0]
self.assertAllEqual(math_ops.bincount(arr).eval(), [6, 5, 4, 3, 2, 1])
self.assertAllEqual(math_ops.bincount([]).eval(), [])
self.assertAllEqual(math_ops.bincount([0, 0, 0]).eval(), [3])
self.assertAllEqual(math_ops.bincount([5]).eval(), [0, 0, 0, 0, 0, 1])
self.assertAllEqual(
math_ops.bincount(np.arange(10000)).eval(), np.ones(10000))
def test_maxlength(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(math_ops.bincount([5], maxlength=3).eval(), [0, 0, 0])
self.assertAllEqual(math_ops.bincount([1], maxlength=3).eval(), [0, 1])
self.assertAllEqual(math_ops.bincount([], maxlength=3).eval(), [])
def test_random_with_weights(self):
num_samples = 10000
with self.test_session(use_gpu=True):
np.random.seed(42)
for dtype in [dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64]:
arr = np.random.randint(0, 1000, num_samples)
if dtype == dtypes.int32 or dtype == dtypes.int64:
weights = np.random.randint(-100, 100, num_samples)
else:
weights = np.random.random(num_samples)
self.assertAllClose(
math_ops.bincount(arr, weights).eval(), np.bincount(arr, weights))
def test_random_without_weights(self):
num_samples = 10000
with self.test_session(use_gpu=True):
np.random.seed(42)
for dtype in [np.int32, np.float32]:
arr = np.random.randint(0, 1000, num_samples)
weights = np.ones(num_samples).astype(dtype)
self.assertAllClose(
math_ops.bincount(arr, None).eval(), np.bincount(arr, weights))
def test_zero_weights(self):
with self.test_session(use_gpu=True):
self.assertAllEqual(
math_ops.bincount(np.arange(1000), np.zeros(1000)).eval(),
np.zeros(1000))
def test_negative(self):
# unsorted_segment_sum will only report InvalidArgumentError on CPU
with self.cached_session():
with self.assertRaises(errors.InvalidArgumentError):
math_ops.bincount([1, 2, 3, -1, 6, 8]).eval()
if __name__ == "__main__":
googletest.main()
| [
"numpy.ones",
"numpy.random.random",
"tensorflow.python.ops.math_ops.bincount",
"numpy.random.randint",
"tensorflow.python.platform.googletest.main",
"numpy.zeros",
"numpy.random.seed",
"numpy.bincount",
"numpy.arange"
] | [((4133, 4150), 'tensorflow.python.platform.googletest.main', 'googletest.main', ([], {}), '()\n', (4148, 4150), False, 'from tensorflow.python.platform import googletest\n'), ((2826, 2844), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (2840, 2844), True, 'import numpy as np\n'), ((3387, 3405), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (3401, 3405), True, 'import numpy as np\n'), ((2396, 2410), 'numpy.ones', 'np.ones', (['(10000)'], {}), '(10000)\n', (2403, 2410), True, 'import numpy as np\n'), ((2940, 2979), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)', 'num_samples'], {}), '(0, 1000, num_samples)\n', (2957, 2979), True, 'import numpy as np\n'), ((3463, 3502), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)', 'num_samples'], {}), '(0, 1000, num_samples)\n', (3480, 3502), True, 'import numpy as np\n'), ((3841, 3855), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (3849, 3855), True, 'import numpy as np\n'), ((3059, 3100), 'numpy.random.randint', 'np.random.randint', (['(-100)', '(100)', 'num_samples'], {}), '(-100, 100, num_samples)\n', (3076, 3100), True, 'import numpy as np\n'), ((3135, 3164), 'numpy.random.random', 'np.random.random', (['num_samples'], {}), '(num_samples)\n', (3151, 3164), True, 'import numpy as np\n'), ((3246, 3271), 'numpy.bincount', 'np.bincount', (['arr', 'weights'], {}), '(arr, weights)\n', (3257, 3271), True, 'import numpy as np\n'), ((3634, 3659), 'numpy.bincount', 'np.bincount', (['arr', 'weights'], {}), '(arr, weights)\n', (3645, 3659), True, 'import numpy as np\n'), ((1247, 1281), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[]'], {'minlength': '(5)'}), '([], minlength=5)\n', (1264, 1281), False, 'from tensorflow.python.ops import math_ops\n'), ((1333, 1367), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[]'], {'minlength': '(1)'}), '([], minlength=1)\n', (1350, 1367), False, 'from tensorflow.python.ops import math_ops\n'), ((1407, 1441), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[]'], {'minlength': '(0)'}), '([], minlength=0)\n', (1424, 1441), False, 'from tensorflow.python.ops import math_ops\n'), ((1805, 1842), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[1, 1, 1, 2, 2, 3]'], {}), '([1, 1, 1, 2, 2, 3])\n', (1822, 1842), False, 'from tensorflow.python.ops import math_ops\n'), ((1949, 1971), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['arr'], {}), '(arr)\n', (1966, 1971), False, 'from tensorflow.python.ops import math_ops\n'), ((2058, 2080), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['arr'], {}), '(arr)\n', (2075, 2080), False, 'from tensorflow.python.ops import math_ops\n'), ((2136, 2157), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[]'], {}), '([])\n', (2153, 2157), False, 'from tensorflow.python.ops import math_ops\n'), ((2196, 2224), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[0, 0, 0]'], {}), '([0, 0, 0])\n', (2213, 2224), False, 'from tensorflow.python.ops import math_ops\n'), ((2264, 2286), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[5]'], {}), '([5])\n', (2281, 2286), False, 'from tensorflow.python.ops import math_ops\n'), ((2509, 2544), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[5]'], {'maxlength': '(3)'}), '([5], maxlength=3)\n', (2526, 2544), False, 'from tensorflow.python.ops import math_ops\n'), ((2590, 2625), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[1]'], {'maxlength': '(3)'}), '([1], maxlength=3)\n', (2607, 2625), False, 'from tensorflow.python.ops import math_ops\n'), ((2668, 2702), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[]'], {'maxlength': '(3)'}), '([], maxlength=3)\n', (2685, 2702), False, 'from tensorflow.python.ops import math_ops\n'), ((3521, 3541), 'numpy.ones', 'np.ones', (['num_samples'], {}), '(num_samples)\n', (3528, 3541), True, 'import numpy as np\n'), ((4056, 4094), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[1, 2, 3, -1, 6, 8]'], {}), '([1, 2, 3, -1, 6, 8])\n', (4073, 4094), False, 'from tensorflow.python.ops import math_ops\n'), ((1488, 1540), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[]'], {'minlength': '(0)', 'dtype': 'np.float32'}), '([], minlength=0, dtype=np.float32)\n', (1505, 1540), False, 'from tensorflow.python.ops import math_ops\n'), ((1611, 1663), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['[]'], {'minlength': '(3)', 'dtype': 'np.float64'}), '([], minlength=3, dtype=np.float64)\n', (1628, 1663), False, 'from tensorflow.python.ops import math_ops\n'), ((2370, 2386), 'numpy.arange', 'np.arange', (['(10000)'], {}), '(10000)\n', (2379, 2386), True, 'import numpy as np\n'), ((3206, 3237), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['arr', 'weights'], {}), '(arr, weights)\n', (3223, 3237), False, 'from tensorflow.python.ops import math_ops\n'), ((3597, 3625), 'tensorflow.python.ops.math_ops.bincount', 'math_ops.bincount', (['arr', 'None'], {}), '(arr, None)\n', (3614, 3625), False, 'from tensorflow.python.ops import math_ops\n'), ((3790, 3805), 'numpy.arange', 'np.arange', (['(1000)'], {}), '(1000)\n', (3799, 3805), True, 'import numpy as np\n'), ((3807, 3821), 'numpy.zeros', 'np.zeros', (['(1000)'], {}), '(1000)\n', (3815, 3821), True, 'import numpy as np\n')] |
import torch
from torch.utils.data.dataset import Dataset
import numpy as np
import pandas as pd
import cv2
from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss
from sklearn.model_selection import train_test_split
from . import config
class PlantPathology(Dataset):
'''Class that represents the images dataset. It allows to feed DataLoader with transformed images and corresponding labels to a model for training.
Inherits from Dataset class.
'''
def __init__(self, df, label_cols=None, is_test=False, apply_transforms=True, to_tensor=True):
self.is_test = is_test
self.apply_transforms = apply_transforms
self.to_tensor = to_tensor
# load metadata
self.df_metadata = df
self.image_ids = self.df_metadata['image_id'].values
if not self.is_test:
self.label_cols = label_cols
self.labels = self.df_metadata[self.label_cols].values
# class weights
self.label_weights = np.log(self.labels.shape[0] / self.labels.sum(axis=0) - 1)
self.transforms = Compose([
Flip(p=0.8),
ShiftScaleRotate(shift_limit=0.05,
scale_limit=0.2,
rotate_limit=90,
p=1.),
RandomBrightnessContrast(p=1.),
OneOf([
IAASharpen(),
IAAEmboss(),
], p=0.5),
RandomCrop(1024, 1024, p=1.),
Resize(64, 64),
CLAHE(clip_limit=(1, 4), tile_grid_size=(8, 8), p=1.),
])
def __len__(self):
return self.image_ids.shape[0]
def __getitem__(self, index):
# read file
if torch.is_tensor(index):
index = index.tolist()
image_path = config.get_image_filename(self.image_ids[index])
image = cv2.imread(image_path)
if self.apply_transforms:
image = self._transform(image)
if self.to_tensor:
image = self._to_tensor(image)
if self.is_test:
return image
else:
label = self.labels[index]
return (image, label)
def _transform(self, image):
return self.transforms(image=image)['image']
def _to_tensor(self, image):
image = image.transpose((2, 0, 1))
image = torch.from_numpy(image).float()
return image
def label_from_vect(self, label_vector):
return self.label_cols[np.argmax(label_vector)]
def stratified_split(df, label_cols, test_size=.2, shuffle=True):
'''Split a dataframe into a training and validation while preserving classes distributions
'''
train, test, _, _ = train_test_split(df, df[label_cols], test_size=test_size, stratify=df[label_cols], shuffle=True)
return train, test
def oversample(df, label_cols, factor, balance_classes=True):
'''Duplicate samples in a dataframe according to the classes distributions and a multiplying factor
'''
if balance_classes:
class_balance = df[label_cols].sum(axis=0) / df[label_cols].shape[0]
class_balance = np.round(class_balance.max() / class_balance).astype('int8').to_dict()
else:
class_balance = {k: 1 for k in label_cols}
for k, v in class_balance.items():
df = df.append([df[df[k] == 1]]*factor*v, ignore_index=True)
return df | [
"albumentations.ShiftScaleRotate",
"sklearn.model_selection.train_test_split",
"albumentations.RandomBrightnessContrast",
"albumentations.IAAEmboss",
"albumentations.Flip",
"numpy.argmax",
"albumentations.RandomCrop",
"torch.from_numpy",
"torch.is_tensor",
"albumentations.Resize",
"albumentation... | [((2951, 3052), 'sklearn.model_selection.train_test_split', 'train_test_split', (['df', 'df[label_cols]'], {'test_size': 'test_size', 'stratify': 'df[label_cols]', 'shuffle': '(True)'}), '(df, df[label_cols], test_size=test_size, stratify=df[\n label_cols], shuffle=True)\n', (2967, 3052), False, 'from sklearn.model_selection import train_test_split\n'), ((1912, 1934), 'torch.is_tensor', 'torch.is_tensor', (['index'], {}), '(index)\n', (1927, 1934), False, 'import torch\n'), ((2066, 2088), 'cv2.imread', 'cv2.imread', (['image_path'], {}), '(image_path)\n', (2076, 2088), False, 'import cv2\n'), ((2731, 2754), 'numpy.argmax', 'np.argmax', (['label_vector'], {}), '(label_vector)\n', (2740, 2754), True, 'import numpy as np\n'), ((1293, 1304), 'albumentations.Flip', 'Flip', ([], {'p': '(0.8)'}), '(p=0.8)\n', (1297, 1304), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n'), ((1318, 1393), 'albumentations.ShiftScaleRotate', 'ShiftScaleRotate', ([], {'shift_limit': '(0.05)', 'scale_limit': '(0.2)', 'rotate_limit': '(90)', 'p': '(1.0)'}), '(shift_limit=0.05, scale_limit=0.2, rotate_limit=90, p=1.0)\n', (1334, 1393), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n'), ((1493, 1524), 'albumentations.RandomBrightnessContrast', 'RandomBrightnessContrast', ([], {'p': '(1.0)'}), '(p=1.0)\n', (1517, 1524), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n'), ((1639, 1668), 'albumentations.RandomCrop', 'RandomCrop', (['(1024)', '(1024)'], {'p': '(1.0)'}), '(1024, 1024, p=1.0)\n', (1649, 1668), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n'), ((1681, 1695), 'albumentations.Resize', 'Resize', (['(64)', '(64)'], {}), '(64, 64)\n', (1687, 1695), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n'), ((1709, 1763), 'albumentations.CLAHE', 'CLAHE', ([], {'clip_limit': '(1, 4)', 'tile_grid_size': '(8, 8)', 'p': '(1.0)'}), '(clip_limit=(1, 4), tile_grid_size=(8, 8), p=1.0)\n', (1714, 1763), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n'), ((2588, 2611), 'torch.from_numpy', 'torch.from_numpy', (['image'], {}), '(image)\n', (2604, 2611), False, 'import torch\n'), ((1561, 1573), 'albumentations.IAASharpen', 'IAASharpen', ([], {}), '()\n', (1571, 1573), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n'), ((1591, 1602), 'albumentations.IAAEmboss', 'IAAEmboss', ([], {}), '()\n', (1600, 1602), False, 'from albumentations import Compose, Flip, RandomScale, ShiftScaleRotate, RandomBrightnessContrast, Rotate, RandomCrop, CenterCrop, Resize, Blur, CLAHE, Equalize, Normalize, OneOf, IAASharpen, IAAEmboss\n')] |
# Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
from dace.transformation.dataflow import GPUTransformMap
import numpy as np
import pytest
# Symbols
N = dace.symbol('N')
M = dace.symbol('M')
K = dace.symbol('K')
L = dace.symbol('L')
X = dace.symbol('X')
Y = dace.symbol('Y')
Z = dace.symbol('Z')
W = dace.symbol('W')
U = dace.symbol('U')
@dace.program
def highdim(A: dace.uint64[N, M, K, L, X, Y, Z, W, U], B: dace.uint64[N, M, K,
L]):
@dace.mapscope
def kernel(i: _[5:N - 5], j: _[0:M], k: _[7:K - 1], l: _[0:L]):
@dace.map
def block(a: _[0:X], b: _[0:Y], c: _[1:Z], d: _[2:W - 2], e: _[0:U]):
input << A[i, j, k, l, a, b, c, d, e]
output >> B(1, lambda a, b: a + b)[i, j, k, l]
output = input
def makendrange(*args):
result = []
for i in range(0, len(args), 2):
result.append((args[i], args[i + 1] - 1, 1))
return result
def _test(sdfg):
# 4D kernel with 5D block
N = 12
M = 3
K = 14
L = 15
X = 1
Y = 2
Z = 3
W = 4
U = 5
dims = tuple(s for s in (N, M, K, L, X, Y, Z, W, U))
outdims = tuple(s for s in (N, M, K, L))
print('High-dimensional GPU kernel test', dims)
A = dace.ndarray((N, M, K, L, X, Y, Z, W, U), dtype=dace.uint64)
B = dace.ndarray((N, M, K, L), dtype=dace.uint64)
A[:] = np.random.randint(10, size=dims).astype(np.uint64)
B[:] = np.zeros(outdims, dtype=np.uint64)
B_regression = np.zeros(outdims, dtype=np.uint64)
# Equivalent python code
for i, j, k, l in dace.ndrange(makendrange(5, N - 5, 0, M, 7, K - 1, 0, L)):
for a, b, c, d, e in dace.ndrange(
makendrange(0, X, 0, Y, 1, Z, 2, W - 2, 0, U)):
B_regression[i, j, k, l] += A[i, j, k, l, a, b, c, d, e]
sdfg(A=A, B=B, N=N, M=M, K=K, L=L, X=X, Y=Y, Z=Z, W=W, U=U)
diff = np.linalg.norm(B_regression - B) / (N * M * K * L)
print('Difference:', diff)
assert diff <= 1e-5
def test_cpu():
_test(highdim.to_sdfg())
@pytest.mark.gpu
def test_gpu():
sdfg = highdim.to_sdfg()
assert sdfg.apply_transformations(GPUTransformMap,
options=dict(fullcopy=True)) == 1
_test(sdfg)
if __name__ == "__main__":
test_cpu()
| [
"dace.symbol",
"numpy.zeros",
"numpy.random.randint",
"numpy.linalg.norm",
"dace.ndarray"
] | [((193, 209), 'dace.symbol', 'dace.symbol', (['"""N"""'], {}), "('N')\n", (204, 209), False, 'import dace\n'), ((214, 230), 'dace.symbol', 'dace.symbol', (['"""M"""'], {}), "('M')\n", (225, 230), False, 'import dace\n'), ((235, 251), 'dace.symbol', 'dace.symbol', (['"""K"""'], {}), "('K')\n", (246, 251), False, 'import dace\n'), ((256, 272), 'dace.symbol', 'dace.symbol', (['"""L"""'], {}), "('L')\n", (267, 272), False, 'import dace\n'), ((278, 294), 'dace.symbol', 'dace.symbol', (['"""X"""'], {}), "('X')\n", (289, 294), False, 'import dace\n'), ((299, 315), 'dace.symbol', 'dace.symbol', (['"""Y"""'], {}), "('Y')\n", (310, 315), False, 'import dace\n'), ((320, 336), 'dace.symbol', 'dace.symbol', (['"""Z"""'], {}), "('Z')\n", (331, 336), False, 'import dace\n'), ((341, 357), 'dace.symbol', 'dace.symbol', (['"""W"""'], {}), "('W')\n", (352, 357), False, 'import dace\n'), ((362, 378), 'dace.symbol', 'dace.symbol', (['"""U"""'], {}), "('U')\n", (373, 378), False, 'import dace\n'), ((1323, 1383), 'dace.ndarray', 'dace.ndarray', (['(N, M, K, L, X, Y, Z, W, U)'], {'dtype': 'dace.uint64'}), '((N, M, K, L, X, Y, Z, W, U), dtype=dace.uint64)\n', (1335, 1383), False, 'import dace\n'), ((1392, 1437), 'dace.ndarray', 'dace.ndarray', (['(N, M, K, L)'], {'dtype': 'dace.uint64'}), '((N, M, K, L), dtype=dace.uint64)\n', (1404, 1437), False, 'import dace\n'), ((1511, 1545), 'numpy.zeros', 'np.zeros', (['outdims'], {'dtype': 'np.uint64'}), '(outdims, dtype=np.uint64)\n', (1519, 1545), True, 'import numpy as np\n'), ((1565, 1599), 'numpy.zeros', 'np.zeros', (['outdims'], {'dtype': 'np.uint64'}), '(outdims, dtype=np.uint64)\n', (1573, 1599), True, 'import numpy as np\n'), ((1964, 1996), 'numpy.linalg.norm', 'np.linalg.norm', (['(B_regression - B)'], {}), '(B_regression - B)\n', (1978, 1996), True, 'import numpy as np\n'), ((1449, 1481), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': 'dims'}), '(10, size=dims)\n', (1466, 1481), True, 'import numpy as np\n')] |
import numpy as np
from numpy import log as ln
from numpy import log10 as log
from numpy import exp
from numba import jit
@jit(nopython=True)
def model_LogicGate_OR_Double_Delay_Delay_ResCompete(y, t, params):
Inde1 = y[0]
Indi1 = y[1]
Inde2 = y[2]
Indi2 = y[3]
mRNA1 = y[4]
Pep1 = y[5]
mRNA2 = y[6]
Pep2 = y[7]
mRNA3 = y[8]
Pep3 = y[9]
syn_mRNA1 = params[0]
syn_mRNA2 = params[1]
syn_mRNA3 = params[2]
deg_mRNA = params[3]
syn_Pep = params[4]
deg_Pep = params[5]
Pepmax = params[6]
Km1 = params[7]
Km2 = params[8]
Ratio = params[9]
state1 = params[10]
state2 = params[11]
dInde1 = -(Inde1/(Inde1+Km1))*Inde1
dIndi1 = (Inde1/(Inde1+Km1))*Inde1
dInde2 = -(Inde2/(Inde2+Km2))*Inde2
dIndi2 = (Inde2/(Inde2+Km2))*Inde2
dmRNA1 = syn_mRNA1*(Indi1)*(state1) - (deg_mRNA *mRNA1)
dPep1 = (syn_Pep*mRNA1) - (deg_Pep*Pep1)
dmRNA2 = syn_mRNA2*(Indi2)*(state2) - (deg_mRNA *mRNA2)
dPep2 = (syn_Pep*mRNA2) - (deg_Pep*Pep2)
dmRNA3 = (syn_mRNA3*((Pep1+Pep2)/Pepmax))-(deg_mRNA *mRNA3)
dPep3 = (syn_Pep*(1-state1*state2*Ratio)*mRNA3)-(deg_Pep*Pep3)
return np.array([dInde1, dIndi1, dInde2, dIndi2, dmRNA1, dPep1, dmRNA2, dPep2, dmRNA3, dPep3]) | [
"numpy.array",
"numba.jit"
] | [((126, 144), 'numba.jit', 'jit', ([], {'nopython': '(True)'}), '(nopython=True)\n', (129, 144), False, 'from numba import jit\n'), ((1114, 1205), 'numpy.array', 'np.array', (['[dInde1, dIndi1, dInde2, dIndi2, dmRNA1, dPep1, dmRNA2, dPep2, dmRNA3, dPep3]'], {}), '([dInde1, dIndi1, dInde2, dIndi2, dmRNA1, dPep1, dmRNA2, dPep2,\n dmRNA3, dPep3])\n', (1122, 1205), True, 'import numpy as np\n')] |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module test add op.
"""
import unittest
from multiprocessing import Manager
import numpy as np
import paddle.fluid as fluid
import paddle_fl.mpc as pfl_mpc
import test_op_base
from paddle_fl.mpc.data_utils.data_utils import get_datautils
aby3 = get_datautils('aby3')
class TestOpPool2d(test_op_base.TestOpBase):
def pool2d(self, **kwargs):
"""
Add two variables with one dimension.
:param kwargs:
:return:
"""
role = kwargs['role']
d_1 = kwargs['data_1'][role]
return_results = kwargs['return_results']
pfl_mpc.init("aby3", role, "localhost", self.server, int(self.port))
x = pfl_mpc.data(name='x', shape=[1, 1, 4, 6], dtype='int64')
pool_out = pfl_mpc.layers.pool2d(input=x, pool_size=2, pool_stride=2)
exe = fluid.Executor(place=fluid.CPUPlace())
#exe.run(fluid.default_startup_program())
results = exe.run(feed={'x': d_1}, fetch_list=[pool_out])
self.assertEqual(results[0].shape, (2, 1, 1, 2, 3))
return_results.append(results[0])
def test_pool2d(self):
data_1 = np.array(
[[[[1, 2, 3, 4, 0, 100],
[5, 6, 7, 8, 0, 100],
[9, 10, 11, 12, 0, 200],
[13, 14, 15, 16, 0, 200]]]]).astype('float32')
expected_out = np.array(
[[[[6, 8, 100],
[14, 16, 200]]]]).astype('float32')
# print("input data_1: {} \n".format(data_1))
data_1_shares = aby3.make_shares(data_1)
data_1_all3shares = np.array([aby3.get_shares(data_1_shares, i) for i in range(3)])
return_results = Manager().list()
ret = self.multi_party_run(target=self.pool2d,
data_1=data_1_all3shares,
return_results=return_results)
self.assertEqual(ret[0], True)
revealed = aby3.reconstruct(np.array(return_results))
#print("revealed: {} \n".format(revealed))
#print("expected: {} \n".format(expected_out))
self.assertTrue(np.allclose(revealed, expected_out, atol=1e-2))
if __name__ == '__main__':
unittest.main()
| [
"numpy.allclose",
"paddle_fl.mpc.data_utils.data_utils.get_datautils",
"paddle.fluid.CPUPlace",
"unittest.main",
"numpy.array",
"paddle_fl.mpc.layers.pool2d",
"paddle_fl.mpc.data",
"multiprocessing.Manager"
] | [((870, 891), 'paddle_fl.mpc.data_utils.data_utils.get_datautils', 'get_datautils', (['"""aby3"""'], {}), "('aby3')\n", (883, 891), False, 'from paddle_fl.mpc.data_utils.data_utils import get_datautils\n'), ((2780, 2795), 'unittest.main', 'unittest.main', ([], {}), '()\n', (2793, 2795), False, 'import unittest\n'), ((1289, 1346), 'paddle_fl.mpc.data', 'pfl_mpc.data', ([], {'name': '"""x"""', 'shape': '[1, 1, 4, 6]', 'dtype': '"""int64"""'}), "(name='x', shape=[1, 1, 4, 6], dtype='int64')\n", (1301, 1346), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((1367, 1425), 'paddle_fl.mpc.layers.pool2d', 'pfl_mpc.layers.pool2d', ([], {'input': 'x', 'pool_size': '(2)', 'pool_stride': '(2)'}), '(input=x, pool_size=2, pool_stride=2)\n', (1388, 1425), True, 'import paddle_fl.mpc as pfl_mpc\n'), ((2543, 2567), 'numpy.array', 'np.array', (['return_results'], {}), '(return_results)\n', (2551, 2567), True, 'import numpy as np\n'), ((2699, 2745), 'numpy.allclose', 'np.allclose', (['revealed', 'expected_out'], {'atol': '(0.01)'}), '(revealed, expected_out, atol=0.01)\n', (2710, 2745), True, 'import numpy as np\n'), ((1462, 1478), 'paddle.fluid.CPUPlace', 'fluid.CPUPlace', ([], {}), '()\n', (1476, 1478), True, 'import paddle.fluid as fluid\n'), ((1746, 1860), 'numpy.array', 'np.array', (['[[[[1, 2, 3, 4, 0, 100], [5, 6, 7, 8, 0, 100], [9, 10, 11, 12, 0, 200], [13,\n 14, 15, 16, 0, 200]]]]'], {}), '([[[[1, 2, 3, 4, 0, 100], [5, 6, 7, 8, 0, 100], [9, 10, 11, 12, 0, \n 200], [13, 14, 15, 16, 0, 200]]]])\n', (1754, 1860), True, 'import numpy as np\n'), ((1956, 1998), 'numpy.array', 'np.array', (['[[[[6, 8, 100], [14, 16, 200]]]]'], {}), '([[[[6, 8, 100], [14, 16, 200]]]])\n', (1964, 1998), True, 'import numpy as np\n'), ((2268, 2277), 'multiprocessing.Manager', 'Manager', ([], {}), '()\n', (2275, 2277), False, 'from multiprocessing import Manager\n')] |
import argparse
import os
import sys
from glob import glob
from os.path import basename, join, splitext
import librosa
import numpy as np
import pysinsy
import soundfile as sf
from nnmnkwii.io import hts
from nnsvs.io.hts import get_note_indices
def _is_silence(label):
is_full_context = "@" in label
if is_full_context:
is_silence = "-sil" in label or "-pau" in label
else:
is_silence = label == "sil" or label == "pau"
return is_silence
def remove_sil_and_pau(lab):
newlab = hts.HTSLabelFile()
for label in lab:
if "-sil" not in label[-1] and "-pau" not in label[-1]:
newlab.append(label, strict=False)
return newlab
def get_parser():
parser = argparse.ArgumentParser(
description="Data preparation for PJS",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument("pjs_root", type=str, help="PJS song dir")
parser.add_argument("out_dir", type=str, help="Output directory")
parser.add_argument("--gain-normalize", action="store_true")
return parser
args = get_parser().parse_args(sys.argv[1:])
out_dir = args.out_dir
gain_normalize = args.gain_normalize
# Time-lag constraints to filter outliers
timelag_allowed_range = (-20, 19)
timelag_allowed_range_rest = (-40, 39)
offset_correction_threshold = 0.01
# Make aligned full context labels
full_align_dir = join(out_dir, "label_phone_align")
full_score_dir = join(out_dir, "label_phone_score")
for d in [full_align_dir, full_score_dir]:
os.makedirs(d, exist_ok=True)
sinsy = pysinsy.sinsy.Sinsy()
assert sinsy.setLanguages("j", pysinsy.get_default_dic_dir())
mono_lab_files = sorted(glob(join(args.pjs_root, "**/*.lab")))
muxicxml_files = sorted(glob(join(args.pjs_root, "**/*.musicxml")))
assert len(mono_lab_files) == len(muxicxml_files)
for mono_path, xml_path in zip(mono_lab_files, muxicxml_files):
align_mono_lab = hts.load(mono_path)
name = basename(mono_path)
assert sinsy.loadScoreFromMusicXML(xml_path)
# check if sinsy's phoneme output is same as the provided alignment format
sinsy_labels = sinsy.createLabelData(True, 1, 1).getData()
sinsy_mono_lab = hts.HTSLabelFile()
for label in sinsy_labels:
sinsy_mono_lab.append(label.split(), strict=False)
assert len(align_mono_lab) == len(sinsy_mono_lab)
assert (
np.asarray(align_mono_lab.contexts) == np.asarray(sinsy_mono_lab.contexts)
).all()
# rounding
has_too_short_ph = False
for idx in range(len(align_mono_lab)):
b, e = align_mono_lab.start_times[idx], align_mono_lab.end_times[idx]
bb, ee = round(b / 50000) * 50000, round(e / 50000) * 50000
# TODO: better way
if bb == ee:
# ensure minimum frame length 1
align_mono_lab.end_times[idx] = align_mono_lab.start_times[idx] + 50000
align_mono_lab.start_times[idx + 1] = align_mono_lab.end_times[idx]
print(align_mono_lab[idx - 1 : idx + 2])
has_too_short_ph = True
if has_too_short_ph:
sinsy.clearScore()
else:
# gen full-context
sinsy_labels = sinsy.createLabelData(False, 1, 1).getData()
align_full_lab = hts.HTSLabelFile()
score_full_lab = hts.HTSLabelFile()
for idx, label in enumerate(sinsy_labels):
b, e = align_mono_lab.start_times[idx], align_mono_lab.end_times[idx]
try:
align_full_lab.append((b, e, label.split()[-1]), strict=True)
except BaseException:
# TODO
import ipdb
ipdb.set_trace()
b, e, c = label.split()
b, e = round(int(b) / 50000) * 50000, round(int(e) / 50000) * 50000
assert b != e
score_full_lab.append((b, e, c), strict=False)
with open(join(full_score_dir, name), "w") as of:
of.write(str(score_full_lab))
with open(join(full_align_dir, name), "w") as of:
of.write(str(align_full_lab))
sinsy.clearScore()
# Prepare data for time-lag models
dst_dir = join(out_dir, "timelag")
lab_align_dst_dir = join(dst_dir, "label_phone_align")
lab_score_dst_dir = join(dst_dir, "label_phone_score")
for d in [lab_align_dst_dir, lab_score_dst_dir]:
os.makedirs(d, exist_ok=True)
print("Prepare data for time-lag models")
full_lab_align_files = sorted(glob(join(full_align_dir, "*.lab")))
full_lab_score_files = sorted(glob(join(full_score_dir, "*.lab")))
for lab_align_path, lab_score_path in zip(full_lab_align_files, full_lab_score_files):
name = basename(lab_align_path)
lab_align = hts.load(lab_align_path)
lab_score = hts.load(lab_score_path)
# this may harm for computing offset
lab_align = remove_sil_and_pau(lab_align)
lab_score = remove_sil_and_pau(lab_score)
# Extract note onsets and let's compute a offset
note_indices = get_note_indices(lab_score)
onset_align = np.asarray(lab_align[note_indices].start_times)
onset_score = np.asarray(lab_score[note_indices].start_times)
global_offset = (onset_align - onset_score).mean()
global_offset = int(round(global_offset / 50000) * 50000)
# Apply offset correction only when there is a big gap
apply_offset_correction = np.abs(global_offset * 1e-7) > offset_correction_threshold
if apply_offset_correction:
print(f"{name}: Global offset (in sec): {global_offset * 1e-7}")
lab_score.start_times = list(np.asarray(lab_score.start_times) + global_offset)
lab_score.end_times = list(np.asarray(lab_score.end_times) + global_offset)
onset_score += global_offset
# Exclude large diff parts (probably a bug of musicxml or alignment though)
valid_note_indices = []
for idx, (a, b) in enumerate(zip(onset_align, onset_score)):
note_idx = note_indices[idx]
lag = np.abs(a - b) / 50000
if _is_silence(lab_score.contexts[note_idx]):
if (
lag >= timelag_allowed_range_rest[0]
and lag <= timelag_allowed_range_rest[1]
):
valid_note_indices.append(note_idx)
else:
if lag >= timelag_allowed_range[0] and lag <= timelag_allowed_range[1]:
valid_note_indices.append(note_idx)
if len(valid_note_indices) < len(note_indices):
D = len(note_indices) - len(valid_note_indices)
print(f"{name}: {D}/{len(note_indices)} time-lags are excluded.")
# Note onsets as labels
lab_align = lab_align[valid_note_indices]
lab_score = lab_score[valid_note_indices]
# Save lab files
lab_align_dst_path = join(lab_align_dst_dir, name)
with open(lab_align_dst_path, "w") as of:
of.write(str(lab_align))
lab_score_dst_path = join(lab_score_dst_dir, name)
with open(lab_score_dst_path, "w") as of:
of.write(str(lab_score))
# Prepare data for duration models
dst_dir = join(out_dir, "duration")
lab_align_dst_dir = join(dst_dir, "label_phone_align")
for d in [lab_align_dst_dir]:
os.makedirs(d, exist_ok=True)
print("Prepare data for duration models")
full_lab_align_files = sorted(glob(join(full_align_dir, "*.lab")))
for lab_align_path in full_lab_align_files:
name = basename(lab_align_path)
lab_align = hts.load(lab_align_path)
# Save lab file
lab_align_dst_path = join(lab_align_dst_dir, name)
with open(lab_align_dst_path, "w") as of:
of.write(str(lab_align))
# Prepare data for acoustic models
dst_dir = join(out_dir, "acoustic")
wav_dst_dir = join(dst_dir, "wav")
lab_align_dst_dir = join(dst_dir, "label_phone_align")
lab_score_dst_dir = join(dst_dir, "label_phone_score")
for d in [wav_dst_dir, lab_align_dst_dir, lab_score_dst_dir]:
os.makedirs(d, exist_ok=True)
print("Prepare data for acoustic models")
full_lab_align_files = sorted(glob(join(full_align_dir, "*.lab")))
full_lab_score_files = sorted(glob(join(full_score_dir, "*.lab")))
for lab_align_path, lab_score_path in zip(full_lab_align_files, full_lab_score_files):
name = splitext(basename(lab_align_path))[0]
wav_path = join(args.pjs_root, name, f"{name}_song.wav")
assert wav_path
# sr, wave = wavfile.read(wav_path)
wav, sr = librosa.load(wav_path, sr=48000)
if gain_normalize:
wav = wav / wav.max() * 0.99
lab_align = hts.load(lab_align_path)
lab_score = hts.load(lab_score_path)
# Save caudio
wav_dst_path = join(wav_dst_dir, name + ".wav")
# TODO: consider explicit subtype
sf.write(wav_dst_path, wav, sr)
# Save label
lab_align_dst_path = join(lab_align_dst_dir, name + ".lab")
with open(lab_align_dst_path, "w") as of:
of.write(str(lab_align))
lab_score_dst_path = join(lab_score_dst_dir, name + ".lab")
with open(lab_score_dst_path, "w") as of:
of.write(str(lab_score))
sys.exit(0)
| [
"numpy.abs",
"pysinsy.sinsy.Sinsy",
"argparse.ArgumentParser",
"os.makedirs",
"nnmnkwii.io.hts.load",
"ipdb.set_trace",
"os.path.join",
"numpy.asarray",
"nnmnkwii.io.hts.HTSLabelFile",
"soundfile.write",
"nnsvs.io.hts.get_note_indices",
"os.path.basename",
"sys.exit",
"pysinsy.get_default_... | [((1400, 1434), 'os.path.join', 'join', (['out_dir', '"""label_phone_align"""'], {}), "(out_dir, 'label_phone_align')\n", (1404, 1434), False, 'from os.path import basename, join, splitext\n'), ((1452, 1486), 'os.path.join', 'join', (['out_dir', '"""label_phone_score"""'], {}), "(out_dir, 'label_phone_score')\n", (1456, 1486), False, 'from os.path import basename, join, splitext\n'), ((1573, 1594), 'pysinsy.sinsy.Sinsy', 'pysinsy.sinsy.Sinsy', ([], {}), '()\n', (1592, 1594), False, 'import pysinsy\n'), ((4109, 4133), 'os.path.join', 'join', (['out_dir', '"""timelag"""'], {}), "(out_dir, 'timelag')\n", (4113, 4133), False, 'from os.path import basename, join, splitext\n'), ((4154, 4188), 'os.path.join', 'join', (['dst_dir', '"""label_phone_align"""'], {}), "(dst_dir, 'label_phone_align')\n", (4158, 4188), False, 'from os.path import basename, join, splitext\n'), ((4209, 4243), 'os.path.join', 'join', (['dst_dir', '"""label_phone_score"""'], {}), "(dst_dir, 'label_phone_score')\n", (4213, 4243), False, 'from os.path import basename, join, splitext\n'), ((6947, 6972), 'os.path.join', 'join', (['out_dir', '"""duration"""'], {}), "(out_dir, 'duration')\n", (6951, 6972), False, 'from os.path import basename, join, splitext\n'), ((6993, 7027), 'os.path.join', 'join', (['dst_dir', '"""label_phone_align"""'], {}), "(dst_dir, 'label_phone_align')\n", (6997, 7027), False, 'from os.path import basename, join, splitext\n'), ((7528, 7553), 'os.path.join', 'join', (['out_dir', '"""acoustic"""'], {}), "(out_dir, 'acoustic')\n", (7532, 7553), False, 'from os.path import basename, join, splitext\n'), ((7568, 7588), 'os.path.join', 'join', (['dst_dir', '"""wav"""'], {}), "(dst_dir, 'wav')\n", (7572, 7588), False, 'from os.path import basename, join, splitext\n'), ((7609, 7643), 'os.path.join', 'join', (['dst_dir', '"""label_phone_align"""'], {}), "(dst_dir, 'label_phone_align')\n", (7613, 7643), False, 'from os.path import basename, join, splitext\n'), ((7664, 7698), 'os.path.join', 'join', (['dst_dir', '"""label_phone_score"""'], {}), "(dst_dir, 'label_phone_score')\n", (7668, 7698), False, 'from os.path import basename, join, splitext\n'), ((8872, 8883), 'sys.exit', 'sys.exit', (['(0)'], {}), '(0)\n', (8880, 8883), False, 'import sys\n'), ((518, 536), 'nnmnkwii.io.hts.HTSLabelFile', 'hts.HTSLabelFile', ([], {}), '()\n', (534, 536), False, 'from nnmnkwii.io import hts\n'), ((722, 845), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Data preparation for PJS"""', 'formatter_class': 'argparse.ArgumentDefaultsHelpFormatter'}), "(description='Data preparation for PJS',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n", (745, 845), False, 'import argparse\n'), ((1534, 1563), 'os.makedirs', 'os.makedirs', (['d'], {'exist_ok': '(True)'}), '(d, exist_ok=True)\n', (1545, 1563), False, 'import os\n'), ((1626, 1655), 'pysinsy.get_default_dic_dir', 'pysinsy.get_default_dic_dir', ([], {}), '()\n', (1653, 1655), False, 'import pysinsy\n'), ((1924, 1943), 'nnmnkwii.io.hts.load', 'hts.load', (['mono_path'], {}), '(mono_path)\n', (1932, 1943), False, 'from nnmnkwii.io import hts\n'), ((1955, 1974), 'os.path.basename', 'basename', (['mono_path'], {}), '(mono_path)\n', (1963, 1974), False, 'from os.path import basename, join, splitext\n'), ((2188, 2206), 'nnmnkwii.io.hts.HTSLabelFile', 'hts.HTSLabelFile', ([], {}), '()\n', (2204, 2206), False, 'from nnmnkwii.io import hts\n'), ((4298, 4327), 'os.makedirs', 'os.makedirs', (['d'], {'exist_ok': '(True)'}), '(d, exist_ok=True)\n', (4309, 4327), False, 'import os\n'), ((4603, 4627), 'os.path.basename', 'basename', (['lab_align_path'], {}), '(lab_align_path)\n', (4611, 4627), False, 'from os.path import basename, join, splitext\n'), ((4645, 4669), 'nnmnkwii.io.hts.load', 'hts.load', (['lab_align_path'], {}), '(lab_align_path)\n', (4653, 4669), False, 'from nnmnkwii.io import hts\n'), ((4686, 4710), 'nnmnkwii.io.hts.load', 'hts.load', (['lab_score_path'], {}), '(lab_score_path)\n', (4694, 4710), False, 'from nnmnkwii.io import hts\n'), ((4918, 4945), 'nnsvs.io.hts.get_note_indices', 'get_note_indices', (['lab_score'], {}), '(lab_score)\n', (4934, 4945), False, 'from nnsvs.io.hts import get_note_indices\n'), ((4965, 5012), 'numpy.asarray', 'np.asarray', (['lab_align[note_indices].start_times'], {}), '(lab_align[note_indices].start_times)\n', (4975, 5012), True, 'import numpy as np\n'), ((5031, 5078), 'numpy.asarray', 'np.asarray', (['lab_score[note_indices].start_times'], {}), '(lab_score[note_indices].start_times)\n', (5041, 5078), True, 'import numpy as np\n'), ((6656, 6685), 'os.path.join', 'join', (['lab_align_dst_dir', 'name'], {}), '(lab_align_dst_dir, name)\n', (6660, 6685), False, 'from os.path import basename, join, splitext\n'), ((6791, 6820), 'os.path.join', 'join', (['lab_score_dst_dir', 'name'], {}), '(lab_score_dst_dir, name)\n', (6795, 6820), False, 'from os.path import basename, join, splitext\n'), ((7063, 7092), 'os.makedirs', 'os.makedirs', (['d'], {'exist_ok': '(True)'}), '(d, exist_ok=True)\n', (7074, 7092), False, 'import os\n'), ((7258, 7282), 'os.path.basename', 'basename', (['lab_align_path'], {}), '(lab_align_path)\n', (7266, 7282), False, 'from os.path import basename, join, splitext\n'), ((7300, 7324), 'nnmnkwii.io.hts.load', 'hts.load', (['lab_align_path'], {}), '(lab_align_path)\n', (7308, 7324), False, 'from nnmnkwii.io import hts\n'), ((7371, 7400), 'os.path.join', 'join', (['lab_align_dst_dir', 'name'], {}), '(lab_align_dst_dir, name)\n', (7375, 7400), False, 'from os.path import basename, join, splitext\n'), ((7766, 7795), 'os.makedirs', 'os.makedirs', (['d'], {'exist_ok': '(True)'}), '(d, exist_ok=True)\n', (7777, 7795), False, 'import os\n'), ((8124, 8169), 'os.path.join', 'join', (['args.pjs_root', 'name', 'f"""{name}_song.wav"""'], {}), "(args.pjs_root, name, f'{name}_song.wav')\n", (8128, 8169), False, 'from os.path import basename, join, splitext\n'), ((8244, 8276), 'librosa.load', 'librosa.load', (['wav_path'], {'sr': '(48000)'}), '(wav_path, sr=48000)\n', (8256, 8276), False, 'import librosa\n'), ((8355, 8379), 'nnmnkwii.io.hts.load', 'hts.load', (['lab_align_path'], {}), '(lab_align_path)\n', (8363, 8379), False, 'from nnmnkwii.io import hts\n'), ((8396, 8420), 'nnmnkwii.io.hts.load', 'hts.load', (['lab_score_path'], {}), '(lab_score_path)\n', (8404, 8420), False, 'from nnmnkwii.io import hts\n'), ((8459, 8491), 'os.path.join', 'join', (['wav_dst_dir', "(name + '.wav')"], {}), "(wav_dst_dir, name + '.wav')\n", (8463, 8491), False, 'from os.path import basename, join, splitext\n'), ((8534, 8565), 'soundfile.write', 'sf.write', (['wav_dst_path', 'wav', 'sr'], {}), '(wav_dst_path, wav, sr)\n', (8542, 8565), True, 'import soundfile as sf\n'), ((8609, 8647), 'os.path.join', 'join', (['lab_align_dst_dir', "(name + '.lab')"], {}), "(lab_align_dst_dir, name + '.lab')\n", (8613, 8647), False, 'from os.path import basename, join, splitext\n'), ((8753, 8791), 'os.path.join', 'join', (['lab_score_dst_dir', "(name + '.lab')"], {}), "(lab_score_dst_dir, name + '.lab')\n", (8757, 8791), False, 'from os.path import basename, join, splitext\n'), ((1687, 1718), 'os.path.join', 'join', (['args.pjs_root', '"""**/*.lab"""'], {}), "(args.pjs_root, '**/*.lab')\n", (1691, 1718), False, 'from os.path import basename, join, splitext\n'), ((1750, 1786), 'os.path.join', 'join', (['args.pjs_root', '"""**/*.musicxml"""'], {}), "(args.pjs_root, '**/*.musicxml')\n", (1754, 1786), False, 'from os.path import basename, join, splitext\n'), ((3222, 3240), 'nnmnkwii.io.hts.HTSLabelFile', 'hts.HTSLabelFile', ([], {}), '()\n', (3238, 3240), False, 'from nnmnkwii.io import hts\n'), ((3266, 3284), 'nnmnkwii.io.hts.HTSLabelFile', 'hts.HTSLabelFile', ([], {}), '()\n', (3282, 3284), False, 'from nnmnkwii.io import hts\n'), ((4406, 4435), 'os.path.join', 'join', (['full_align_dir', '"""*.lab"""'], {}), "(full_align_dir, '*.lab')\n", (4410, 4435), False, 'from os.path import basename, join, splitext\n'), ((4473, 4502), 'os.path.join', 'join', (['full_score_dir', '"""*.lab"""'], {}), "(full_score_dir, '*.lab')\n", (4477, 4502), False, 'from os.path import basename, join, splitext\n'), ((5287, 5316), 'numpy.abs', 'np.abs', (['(global_offset * 1e-07)'], {}), '(global_offset * 1e-07)\n', (5293, 5316), True, 'import numpy as np\n'), ((7171, 7200), 'os.path.join', 'join', (['full_align_dir', '"""*.lab"""'], {}), "(full_align_dir, '*.lab')\n", (7175, 7200), False, 'from os.path import basename, join, splitext\n'), ((7874, 7903), 'os.path.join', 'join', (['full_align_dir', '"""*.lab"""'], {}), "(full_align_dir, '*.lab')\n", (7878, 7903), False, 'from os.path import basename, join, splitext\n'), ((7941, 7970), 'os.path.join', 'join', (['full_score_dir', '"""*.lab"""'], {}), "(full_score_dir, '*.lab')\n", (7945, 7970), False, 'from os.path import basename, join, splitext\n'), ((5885, 5898), 'numpy.abs', 'np.abs', (['(a - b)'], {}), '(a - b)\n', (5891, 5898), True, 'import numpy as np\n'), ((8080, 8104), 'os.path.basename', 'basename', (['lab_align_path'], {}), '(lab_align_path)\n', (8088, 8104), False, 'from os.path import basename, join, splitext\n'), ((2373, 2408), 'numpy.asarray', 'np.asarray', (['align_mono_lab.contexts'], {}), '(align_mono_lab.contexts)\n', (2383, 2408), True, 'import numpy as np\n'), ((2412, 2447), 'numpy.asarray', 'np.asarray', (['sinsy_mono_lab.contexts'], {}), '(sinsy_mono_lab.contexts)\n', (2422, 2447), True, 'import numpy as np\n'), ((3852, 3878), 'os.path.join', 'join', (['full_score_dir', 'name'], {}), '(full_score_dir, name)\n', (3856, 3878), False, 'from os.path import basename, join, splitext\n'), ((3952, 3978), 'os.path.join', 'join', (['full_align_dir', 'name'], {}), '(full_align_dir, name)\n', (3956, 3978), False, 'from os.path import basename, join, splitext\n'), ((5488, 5521), 'numpy.asarray', 'np.asarray', (['lab_score.start_times'], {}), '(lab_score.start_times)\n', (5498, 5521), True, 'import numpy as np\n'), ((5574, 5605), 'numpy.asarray', 'np.asarray', (['lab_score.end_times'], {}), '(lab_score.end_times)\n', (5584, 5605), True, 'import numpy as np\n'), ((3615, 3631), 'ipdb.set_trace', 'ipdb.set_trace', ([], {}), '()\n', (3629, 3631), False, 'import ipdb\n')] |
'''
More factorization code, courtesy of <NAME>.
'''
import numpy as np
import dataclasses
def moments(muhat_row,Sighat_row,muhat_col,Sighat_col,**kwargs):
row_m2 = Sighat_row + np.einsum('ij,ik->ijk',muhat_row,muhat_row)
col_m2 = Sighat_col + np.einsum('ij,ik->ijk',muhat_col,muhat_col)
mn= muhat_row @ muhat_col.T
e2 = np.einsum('ajk,bjk -> ab',row_m2,col_m2)
vr = e2-mn**2
return row_m2,col_m2,mn,vr
def prior_KL(muhat,Sighat,mu,Sig):
df = muhat - mu[None,:]
m2 = Sighat + np.einsum('ij,ik->ijk',df,df)
mahaltr = np.sum(np.linalg.inv(Sig)[None,:,:]*m2)
nobs = np.prod(muhat.shape)
sigdet_prior = muhat.shape[0]*np.linalg.slogdet(Sig)[1]
sigdet_post = np.sum(np.linalg.slogdet(Sighat)[1])
return .5 * (mahaltr - nobs + sigdet_prior - sigdet_post)
r'''
_ _ _ _
__| (_)___ _ __ __ _| |_ ___| |__
/ _` | / __| '_ \ / _` | __/ __| '_ \
| (_| | \__ \ |_) | (_| | || (__| | | |
\__,_|_|___/ .__/ \__,_|\__\___|_| |_|
|_|
'''
def ELBO_dataterm(X,mn,vr,kind,theta=None):
if kind=='normal':
return loss_normal(X,mn,vr,theta)
elif kind=='bernoulli':
return loss_bernoulli(X,mn,vr)
else:
raise Exception("NYI")
def accumulate_omega_for_rows(X,muhat_row,Sighat_row,muhat_col,Sighat_col,kind,theta=None):
row_m2,col_m2,mn,vr= moments(muhat_row,Sighat_row,muhat_col,Sighat_col)
xi1,xi2=get_xi(X,mn,vr,kind,theta) # <-- Nrow x Ncol
omega_2 = np.einsum('rc,cij -> rij',xi2,col_m2)
omega_1 = np.einsum('rc,ci -> ri',xi1,muhat_col)
return omega_1,omega_2
def accumulate_omega_for_cols(X,muhat_row,Sighat_row,muhat_col,Sighat_col,kind,theta=None):
row_m2,col_m2,mn,vr= moments(muhat_row,Sighat_row,muhat_col,Sighat_col)
xi1,xi2=get_xi(X,mn,vr,kind,theta) # <-- Nrow x Ncol
omega_2 = np.einsum('rc,rij -> cij',xi2,row_m2)
omega_1 = np.einsum('rc,ri -> ci',xi1,muhat_row)
return omega_1,omega_2
def get_xi(X,mn,vr,kind,theta=None):
if kind=='normal':
return get_xi_normal(X,mn,vr,theta)
elif kind=='bernoulli':
return get_xi_bernoulli(X,mn,vr)
else:
raise Exception("NYI")
def get_new_theta(X,mn,vr,kind,theta=None):
if kind=='normal':
zeta = get_zeta_normal(X,mn,vr)
return np.sqrt(np.mean(zeta,axis=0))
elif kind=='bernoulli':
return None
else:
raise Exception("NYI")
r'''
_ _ _
__| | __ _| |_ __ _| |_ ___ _ __ _ __ ___ ___
/ _` |/ _` | __/ _` | __/ _ \ '__| '_ ` _ \/ __|
| (_| | (_| | || (_| | || __/ | | | | | | \__ \
\__,_|\__,_|\__\__,_|\__\___|_| |_| |_| |_|___/
'''
def loss_normal(X,curmu,curvar,theta):
zeta = (X-curmu)**2 + curvar
return -.5*zeta/(2*theta**2) -.5*np.log(np.pi*2*theta**2)
def loss_bernoulli(X,curmu,curvar):
return (X-.5)*curmu - log2cosho2_safe(np.sqrt(curmu**2+curvar**2))
def get_zeta_normal(X,curmu,curvar):
return (X-curmu)**2 + curvar
def solve_zeta_normal(zeta):
return np.mean(zeta,axis=0)
def get_xi_normal(X,curmu,curvar,theta):
'''
Input
- X Nrow x Ncol
- curmu Nrow x Ncol
- curvar Nrow x Ncol
- theta Nrow x Ncol (or broadcastable to)
Output
- xi1 Nrow x Ncol
- xi2 Nrow x Ncol
'''
return X/theta**2,np.outer(np.ones(X.shape[0]),1/theta**2)
def get_xi_bernoulli(X,curmu,curvar):
'''
Input
- X Nrow x Ncol
- curmu Nrow x Ncol
- curvar Nrow x Ncol
Output
- xi1
- xi2
'''
gamsq = curmu**2 + curvar
gam = np.sqrt(gamsq)
xi2 = pge_safe(gam)
xi1 = (X-.5)
return xi1,xi2
def pge_safe(x):
switch=np.abs(x)<.00001
A=.25-0.020833333333333332*(x**2)
B=np.tanh(x/2)/(2*x)
return np.where(switch,A,B)
def log2cosho2_safe(x):
'''
returns log(2*(cosh(x/2))
'''
return np.log(2*np.cosh(x/2)) | [
"numpy.prod",
"numpy.mean",
"numpy.abs",
"numpy.sqrt",
"numpy.ones",
"numpy.where",
"numpy.log",
"numpy.tanh",
"numpy.linalg.slogdet",
"numpy.linalg.inv",
"numpy.einsum",
"numpy.cosh"
] | [((339, 381), 'numpy.einsum', 'np.einsum', (['"""ajk,bjk -> ab"""', 'row_m2', 'col_m2'], {}), "('ajk,bjk -> ab', row_m2, col_m2)\n", (348, 381), True, 'import numpy as np\n'), ((609, 629), 'numpy.prod', 'np.prod', (['muhat.shape'], {}), '(muhat.shape)\n', (616, 629), True, 'import numpy as np\n'), ((1518, 1557), 'numpy.einsum', 'np.einsum', (['"""rc,cij -> rij"""', 'xi2', 'col_m2'], {}), "('rc,cij -> rij', xi2, col_m2)\n", (1527, 1557), True, 'import numpy as np\n'), ((1570, 1610), 'numpy.einsum', 'np.einsum', (['"""rc,ci -> ri"""', 'xi1', 'muhat_col'], {}), "('rc,ci -> ri', xi1, muhat_col)\n", (1579, 1610), True, 'import numpy as np\n'), ((1877, 1916), 'numpy.einsum', 'np.einsum', (['"""rc,rij -> cij"""', 'xi2', 'row_m2'], {}), "('rc,rij -> cij', xi2, row_m2)\n", (1886, 1916), True, 'import numpy as np\n'), ((1929, 1969), 'numpy.einsum', 'np.einsum', (['"""rc,ri -> ci"""', 'xi1', 'muhat_row'], {}), "('rc,ri -> ci', xi1, muhat_row)\n", (1938, 1969), True, 'import numpy as np\n'), ((3117, 3138), 'numpy.mean', 'np.mean', (['zeta'], {'axis': '(0)'}), '(zeta, axis=0)\n', (3124, 3138), True, 'import numpy as np\n'), ((3668, 3682), 'numpy.sqrt', 'np.sqrt', (['gamsq'], {}), '(gamsq)\n', (3675, 3682), True, 'import numpy as np\n'), ((3866, 3888), 'numpy.where', 'np.where', (['switch', 'A', 'B'], {}), '(switch, A, B)\n', (3874, 3888), True, 'import numpy as np\n'), ((183, 228), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'muhat_row', 'muhat_row'], {}), "('ij,ik->ijk', muhat_row, muhat_row)\n", (192, 228), True, 'import numpy as np\n'), ((253, 298), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'muhat_col', 'muhat_col'], {}), "('ij,ik->ijk', muhat_col, muhat_col)\n", (262, 298), True, 'import numpy as np\n'), ((512, 543), 'numpy.einsum', 'np.einsum', (['"""ij,ik->ijk"""', 'df', 'df'], {}), "('ij,ik->ijk', df, df)\n", (521, 543), True, 'import numpy as np\n'), ((3774, 3783), 'numpy.abs', 'np.abs', (['x'], {}), '(x)\n', (3780, 3783), True, 'import numpy as np\n'), ((3836, 3850), 'numpy.tanh', 'np.tanh', (['(x / 2)'], {}), '(x / 2)\n', (3843, 3850), True, 'import numpy as np\n'), ((665, 687), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['Sig'], {}), '(Sig)\n', (682, 687), True, 'import numpy as np\n'), ((716, 741), 'numpy.linalg.slogdet', 'np.linalg.slogdet', (['Sighat'], {}), '(Sighat)\n', (733, 741), True, 'import numpy as np\n'), ((2341, 2362), 'numpy.mean', 'np.mean', (['zeta'], {'axis': '(0)'}), '(zeta, axis=0)\n', (2348, 2362), True, 'import numpy as np\n'), ((2872, 2902), 'numpy.log', 'np.log', (['(np.pi * 2 * theta ** 2)'], {}), '(np.pi * 2 * theta ** 2)\n', (2878, 2902), True, 'import numpy as np\n'), ((2976, 3009), 'numpy.sqrt', 'np.sqrt', (['(curmu ** 2 + curvar ** 2)'], {}), '(curmu ** 2 + curvar ** 2)\n', (2983, 3009), True, 'import numpy as np\n'), ((3423, 3442), 'numpy.ones', 'np.ones', (['X.shape[0]'], {}), '(X.shape[0])\n', (3430, 3442), True, 'import numpy as np\n'), ((3979, 3993), 'numpy.cosh', 'np.cosh', (['(x / 2)'], {}), '(x / 2)\n', (3986, 3993), True, 'import numpy as np\n'), ((564, 582), 'numpy.linalg.inv', 'np.linalg.inv', (['Sig'], {}), '(Sig)\n', (577, 582), True, 'import numpy as np\n')] |
"""
Example for BatchIntrinsicPlasticity
"""
import os
import numpy as np
from pyrcn.base.blocks import BatchIntrinsicPlasticity
import matplotlib.pyplot as plt
import seaborn as sns
sns.set_theme()
tud_colors = {
'darkblue': (0 / 255., 48 / 255., 94 / 255.),
'gray': (114 / 255., 120 / 255., 121 / 255.),
'lightblue': (0 / 255., 106 / 255., 179 / 255.),
'darkgreen': (0 / 255., 125 / 255., 64 / 255.),
'lightgreen': (106 / 255., 176 / 255., 35 / 255.),
'darkpurple': (84 / 255., 55 / 255., 138 / 255.),
'lightpurple': (147 / 255., 16 / 255., 126 / 255.),
'orange': (238 / 255., 127 / 255., 0 / 255.),
'red': (181 / 255., 28 / 255., 28 / 255.)
}
directory = os.path.join(os.getcwd(), 'bip')
def main():
if not os.path.exists(directory):
os.makedirs(directory)
rs = np.random.RandomState(42)
algorithm = 'dresden'
sample_size = (1000, 1)
i2n_uniform = BatchIntrinsicPlasticity(hidden_layer_size=1, input_activation='tanh',
random_state=rs, distribution='uniform',
algorithm=algorithm)
i2n_exponential = BatchIntrinsicPlasticity(hidden_layer_size=1,
input_activation='tanh', random_state=rs,
distribution='exponential',
algorithm=algorithm)
i2n_normal = BatchIntrinsicPlasticity(hidden_layer_size=1, input_activation='tanh',
random_state=rs, distribution='normal',
algorithm=algorithm)
X_uniform = rs.uniform(size=sample_size)
X_exponential = rs.exponential(size=sample_size)
X_normal = rs.normal(size=sample_size)
def exponential(x, lam):
return lam * np.exp(-lam * x)
def gaussian(x, mu, sig):
return np.exp(-np.power(x - mu, 2.) / (2 * np.power(sig, 2.))) \
/ np.sqrt(2. * np.pi) / sig
# X_uniform = np.linspace(start=-1., stop=1., num=1000).reshape(-1, 1)
# X_exponential = exponential(X_uniform + 1., 1)
# X_normal = gaussian(X_uniform, 0, 1)
"""
y_uni_exp = i2n_exponential.fit_transform(X_uniform)
y_exp_exp = i2n_exponential.fit_transform(X_exponential)
y_norm_exp = i2n_exponential.fit_transform(X_normal)
y_uni_uni = i2n_uniform.fit_transform(X_uniform)
y_exp_uni = i2n_uniform.fit_transform(X_exponential)
y_norm_uni = i2n_uniform.fit_transform(X_normal)
y_uni_norm = i2n_normal.fit_transform(X_uniform)
y_exp_norm = i2n_normal.fit_transform(X_exponential)
y_norm_norm = i2n_normal.fit_transform(X_normal)
"""
# display distributions
fig, axs = plt.subplots(3, 4)
# plt.ylabel('f_x')
# plt.xlabel('f_y')
# fig.suptitle('BIP transformations')
bins = 20
sns.histplot(data=i2n_exponential.fit_transform(X_exponential), bins=bins,
stat="density", color=tud_colors['lightblue'], ax=axs[0, 0],
legend=False)
axs[0, 0].set_xlim((-1., 1.))
axs[0, 0].set_ylim((0., 3.))
sns.histplot(data=i2n_normal.fit_transform(X_exponential), bins=bins,
stat="density", color=tud_colors['lightgreen'], ax=axs[0, 1],
legend=False)
axs[0, 1].set_xlim((-1., 1.))
axs[0, 1].set_ylim((0., 3.))
sns.histplot(data=i2n_uniform.fit_transform(X_exponential), bins=bins,
stat="density", color=tud_colors['lightpurple'], ax=axs[0, 2],
legend=False)
axs[0, 2].set_xlim((-1., 1.))
axs[0, 2].set_ylim((0., 3.))
sns.histplot(data=i2n_exponential.fit_transform(X_normal), bins=bins,
stat="density", color=tud_colors['lightblue'], ax=axs[1, 0],
legend=False)
axs[1, 0].set_xlim((-1., 1.))
axs[1, 0].set_ylim((0., 1.5))
sns.histplot(data=i2n_normal.fit_transform(X_normal), bins=bins, stat="density",
color=tud_colors['lightgreen'], ax=axs[1, 1], legend=False)
axs[1, 1].set_xlim((-1., 1.))
axs[1, 1].set_ylim((0., 1.5))
sns.histplot(data=i2n_uniform.fit_transform(X_normal), bins=bins, stat="density",
color=tud_colors['lightpurple'], ax=axs[1, 2], legend=False)
axs[1, 2].set_xlim((-1., 1.))
axs[1, 2].set_ylim((0., 1.5))
sns.histplot(data=i2n_exponential.fit_transform(X_uniform), bins=bins,
stat="density", color=tud_colors['lightblue'], ax=axs[2, 0],
legend=False)
axs[2, 0].set_xlim((-1., 1.))
axs[2, 0].set_ylim((0., 2.5))
sns.histplot(data=i2n_normal.fit_transform(X_uniform), bins=bins, stat="density",
color=tud_colors['lightgreen'], ax=axs[2, 1], legend=False)
axs[2, 1].set_xlim((-1., 1.))
axs[2, 1].set_ylim((0., 2.5))
sns.histplot(data=i2n_uniform.fit_transform(X_uniform), bins=bins, stat="density",
color=tud_colors['lightpurple'], ax=axs[2, 2], legend=False)
axs[2, 2].set_xlim((-1., 1.))
axs[2, 2].set_ylim((0., 2.5))
sns.histplot(data=X_exponential, bins=bins, color=tud_colors['gray'], ax=axs[0, 3],
legend=False)
axs[0, 3].set_title('exponential')
sns.histplot(data=X_normal, bins=bins, color=tud_colors['gray'], ax=axs[1, 3],
legend=False)
axs[1, 3].set_title('normal')
sns.histplot(data=X_uniform, bins=bins, color=tud_colors['gray'], ax=axs[2, 3],
legend=False)
axs[2, 3].set_title('uniform')
plt.tight_layout()
plt.show()
if __name__ == "__main__":
main()
| [
"os.path.exists",
"numpy.sqrt",
"os.makedirs",
"numpy.power",
"seaborn.set_theme",
"seaborn.histplot",
"os.getcwd",
"numpy.exp",
"matplotlib.pyplot.tight_layout",
"pyrcn.base.blocks.BatchIntrinsicPlasticity",
"matplotlib.pyplot.subplots",
"numpy.random.RandomState",
"matplotlib.pyplot.show"
... | [((184, 199), 'seaborn.set_theme', 'sns.set_theme', ([], {}), '()\n', (197, 199), True, 'import seaborn as sns\n'), ((712, 723), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (721, 723), False, 'import os\n'), ((825, 850), 'numpy.random.RandomState', 'np.random.RandomState', (['(42)'], {}), '(42)\n', (846, 850), True, 'import numpy as np\n'), ((925, 1061), 'pyrcn.base.blocks.BatchIntrinsicPlasticity', 'BatchIntrinsicPlasticity', ([], {'hidden_layer_size': '(1)', 'input_activation': '"""tanh"""', 'random_state': 'rs', 'distribution': '"""uniform"""', 'algorithm': 'algorithm'}), "(hidden_layer_size=1, input_activation='tanh',\n random_state=rs, distribution='uniform', algorithm=algorithm)\n", (949, 1061), False, 'from pyrcn.base.blocks import BatchIntrinsicPlasticity\n'), ((1166, 1306), 'pyrcn.base.blocks.BatchIntrinsicPlasticity', 'BatchIntrinsicPlasticity', ([], {'hidden_layer_size': '(1)', 'input_activation': '"""tanh"""', 'random_state': 'rs', 'distribution': '"""exponential"""', 'algorithm': 'algorithm'}), "(hidden_layer_size=1, input_activation='tanh',\n random_state=rs, distribution='exponential', algorithm=algorithm)\n", (1190, 1306), False, 'from pyrcn.base.blocks import BatchIntrinsicPlasticity\n'), ((1461, 1596), 'pyrcn.base.blocks.BatchIntrinsicPlasticity', 'BatchIntrinsicPlasticity', ([], {'hidden_layer_size': '(1)', 'input_activation': '"""tanh"""', 'random_state': 'rs', 'distribution': '"""normal"""', 'algorithm': 'algorithm'}), "(hidden_layer_size=1, input_activation='tanh',\n random_state=rs, distribution='normal', algorithm=algorithm)\n", (1485, 1596), False, 'from pyrcn.base.blocks import BatchIntrinsicPlasticity\n'), ((2801, 2819), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(3)', '(4)'], {}), '(3, 4)\n', (2813, 2819), True, 'import matplotlib.pyplot as plt\n'), ((5119, 5221), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'X_exponential', 'bins': 'bins', 'color': "tud_colors['gray']", 'ax': 'axs[0, 3]', 'legend': '(False)'}), "(data=X_exponential, bins=bins, color=tud_colors['gray'], ax=\n axs[0, 3], legend=False)\n", (5131, 5221), True, 'import seaborn as sns\n'), ((5277, 5374), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'X_normal', 'bins': 'bins', 'color': "tud_colors['gray']", 'ax': 'axs[1, 3]', 'legend': '(False)'}), "(data=X_normal, bins=bins, color=tud_colors['gray'], ax=axs[1, \n 3], legend=False)\n", (5289, 5374), True, 'import seaborn as sns\n'), ((5425, 5522), 'seaborn.histplot', 'sns.histplot', ([], {'data': 'X_uniform', 'bins': 'bins', 'color': "tud_colors['gray']", 'ax': 'axs[2, 3]', 'legend': '(False)'}), "(data=X_uniform, bins=bins, color=tud_colors['gray'], ax=axs[2,\n 3], legend=False)\n", (5437, 5522), True, 'import seaborn as sns\n'), ((5576, 5594), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (5592, 5594), True, 'import matplotlib.pyplot as plt\n'), ((5599, 5609), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5607, 5609), True, 'import matplotlib.pyplot as plt\n'), ((757, 782), 'os.path.exists', 'os.path.exists', (['directory'], {}), '(directory)\n', (771, 782), False, 'import os\n'), ((792, 814), 'os.makedirs', 'os.makedirs', (['directory'], {}), '(directory)\n', (803, 814), False, 'import os\n'), ((1870, 1886), 'numpy.exp', 'np.exp', (['(-lam * x)'], {}), '(-lam * x)\n', (1876, 1886), True, 'import numpy as np\n'), ((2005, 2025), 'numpy.sqrt', 'np.sqrt', (['(2.0 * np.pi)'], {}), '(2.0 * np.pi)\n', (2012, 2025), True, 'import numpy as np\n'), ((1941, 1962), 'numpy.power', 'np.power', (['(x - mu)', '(2.0)'], {}), '(x - mu, 2.0)\n', (1949, 1962), True, 'import numpy as np\n'), ((1969, 1987), 'numpy.power', 'np.power', (['sig', '(2.0)'], {}), '(sig, 2.0)\n', (1977, 1987), True, 'import numpy as np\n')] |
# ---
# jupyter:
# jupytext:
# formats: ipynb,.pct.py:percent
# text_representation:
# extension: .py
# format_name: percent
# format_version: '1.3'
# jupytext_version: 1.3.3
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# %% [markdown]
# # Natural gradients
#
# This notebook shows some basic usage of the natural gradient optimizer, both on its own and in combination with Adam optimizer.
# %%
import warnings
import numpy as np
import gpflow
import tensorflow as tf
from gpflow.ci_utils import ci_niter, ci_range
from gpflow.models import VGP, GPR, SGPR, SVGP
from gpflow.optimizers import NaturalGradient
from gpflow.optimizers.natgrad import XiSqrtMeanVar
from gpflow import set_trainable
# %matplotlib inline
# %precision 4
np.random.seed(0)
tf.random.set_seed(0)
N, D = 100, 2
batch_size = 50
# inducing points
M = 10
x = np.random.uniform(size=(N, D))
y = np.sin(10 * x[:, :1]) + 5 * x[:, 1:] ** 2
data = (x, y)
inducing_variable = tf.random.uniform((M, D))
adam_learning_rate = 0.01
iterations = ci_niter(5)
# %% [markdown]
# ### VGP is a GPR
# %% [markdown]
# The following section demonstrates how natural gradients can turn VGP into GPR *in a single step, if the likelihood is Gaussian*.
# %% [markdown]
# Let's start by first creating a standard GPR model with Gaussian likelihood:
# %%
gpr = GPR(data, kernel=gpflow.kernels.Matern52())
# %% [markdown]
# The likelihood of the exact GP model is:
# %%
gpr.log_likelihood().numpy()
# %% [markdown]
# Now we will create an approximate model which approximates the true posterior via a variational Gaussian distribution.<br>We initialize the distribution to be zero mean and unit variance.
# %%
vgp = VGP(data, kernel=gpflow.kernels.Matern52(), likelihood=gpflow.likelihoods.Gaussian())
# %% [markdown]
# The likelihood of the approximate GP model is:
# %%
vgp.log_likelihood().numpy()
# %% [markdown]
# Obviously, our initial guess for the variational distribution is not correct, which results in a lower bound to the likelihood of the exact GPR model. We can optimize the variational parameters in order to get a tighter bound.
# %% [markdown]
# In fact, we only need to take **one step** in the natural gradient direction to recover the exact posterior:
# %%
natgrad_opt = NaturalGradient(gamma=1.0)
variational_params = [(vgp.q_mu, vgp.q_sqrt)]
natgrad_opt.minimize(lambda: -vgp.log_marginal_likelihood(), var_list=variational_params)
# %% [markdown]
# The likelihood of the approximate GP model after a single NatGrad step:
# %%
vgp.log_likelihood().numpy()
# %% [markdown]
# ### Optimize both variational parameters and kernel hyperparameters together
#
# In the Gaussian likelihood case we can iterate between an Adam update for the hyperparameters and a NatGrad update for the variational parameters. That way, we achieve optimization of hyperparameters as if the model were a GPR.
# %% [markdown]
# The trick is to forbid Adam from updating the variational parameters by setting them to not trainable.
# %%
# Stop Adam from optimizing the variational parameters
set_trainable(vgp.q_mu, False)
set_trainable(vgp.q_sqrt, False)
adam_opt_for_vgp = tf.optimizers.Adam(adam_learning_rate)
adam_opt_for_gpr = tf.optimizers.Adam(adam_learning_rate)
# %%
for i in range(iterations):
adam_opt_for_gpr.minimize(
lambda: -gpr.log_marginal_likelihood(), var_list=gpr.trainable_variables
)
likelihood = gpr.log_likelihood()
tf.print(f"GPR with Adam: iteration {i + 1} likelihood {likelihood:.04f}")
# %%
for i in range(iterations):
adam_opt_for_vgp.minimize(
lambda: -vgp.log_marginal_likelihood(), var_list=vgp.trainable_variables
)
natgrad_opt.minimize(lambda: -vgp.log_marginal_likelihood(), var_list=variational_params)
likelihood = vgp.log_likelihood()
tf.print(f"VGP with NaturalGradient and Adam: iteration {i + 1} likelihood {likelihood:.04f}")
# %% [markdown]
# Compare GPR and VGP lengthscales after optimization:
# %%
print(f"GPR lengthscales = {gpr.kernel.lengthscales.numpy():.04f}")
print(f"VGP lengthscales = {vgp.kernel.lengthscales.numpy():.04f}")
# %% [markdown]
# ### Natural gradients also work for the sparse model
# Similarly, natural gradients turn SVGP into SGPR in the Gaussian likelihood case. <br>
# We can again combine natural gradients with Adam to update both variational parameters and hyperparameters too.<br>
# Here we'll just do a single natural step demonstration.
# %%
svgp = SVGP(
kernel=gpflow.kernels.Matern52(),
likelihood=gpflow.likelihoods.Gaussian(),
inducing_variable=inducing_variable,
)
sgpr = SGPR(data, kernel=gpflow.kernels.Matern52(), inducing_variable=inducing_variable)
for model in svgp, sgpr:
model.likelihood.variance.assign(0.1)
# %% [markdown]
# Analytically optimal sparse model likelihood:
# %%
sgpr.log_likelihood().numpy()
# %% [markdown]
# SVGP likelihood before natural gradient step:
# %%
svgp.log_likelihood(data).numpy()
# %%
variational_params = [(svgp.q_mu, svgp.q_sqrt)]
def svgp_loss_cb():
return -svgp.log_marginal_likelihood(data)
natgrad_opt = NaturalGradient(gamma=1.0)
natgrad_opt.minimize(svgp_loss_cb, var_list=variational_params)
# %% [markdown]
# SVGP likelihood after a single natural gradient step:
# %%
svgp.log_likelihood(data).numpy()
# %% [markdown]
# ### Minibatches
# A crucial property of the natural gradient method is that it still works with minibatches.
# In practice though, we need to use a smaller gamma.
# %%
natgrad_opt = NaturalGradient(gamma=0.1)
data_minibatch = (
tf.data.Dataset.from_tensor_slices(data).prefetch(N).repeat().shuffle(N).batch(batch_size)
)
data_minibatch_it = iter(data_minibatch)
def svgp_stochastic_loss_cb() -> tf.Tensor:
batch = next(data_minibatch_it)
return -svgp.log_marginal_likelihood(batch)
for _ in range(ci_niter(100)):
natgrad_opt.minimize(svgp_stochastic_loss_cb, var_list=variational_params)
# %% [markdown]
# Minibatch SVGP likelihood after NatGrad optimization:
# %%
np.average([svgp.log_likelihood(next(data_minibatch_it)) for _ in ci_range(100)])
# %% [markdown]
# ### Comparison with ordinary gradients in the conjugate case
#
# ##### (Take home message: natural gradients are always better)
#
# Compared to SVGP with ordinary gradients with minibatches, the natural gradient optimizer is much faster in the Gaussian case.
#
# Here we'll do hyperparameter learning together with optimization of the variational parameters, comparing the interleaved natural gradient approach and the one using ordinary gradients for the hyperparameters and variational parameters jointly.
#
# **NOTE:** Again we need to compromise for smaller gamma value, which we'll keep *fixed* during the optimization.
# %%
svgp_ordinary = SVGP(
kernel=gpflow.kernels.Matern52(),
likelihood=gpflow.likelihoods.Gaussian(),
inducing_variable=inducing_variable,
)
svgp_natgrad = SVGP(
kernel=gpflow.kernels.Matern52(),
likelihood=gpflow.likelihoods.Gaussian(),
inducing_variable=inducing_variable,
)
# ordinary gradients with Adam for SVGP
ordinary_adam_opt = tf.optimizers.Adam(adam_learning_rate)
# NatGrads and Adam for SVGP
# Stop Adam from optimizing the variational parameters
set_trainable(svgp_natgrad.q_mu, False)
set_trainable(svgp_natgrad.q_sqrt, False)
# Create the optimize_tensors for SVGP
natgrad_adam_opt = tf.optimizers.Adam(adam_learning_rate)
natgrad_opt = NaturalGradient(gamma=0.1)
variational_params = [(svgp_natgrad.q_mu, svgp_natgrad.q_sqrt)]
# %% [markdown]
# Let's optimize the models:
# %%
data_minibatch = (
tf.data.Dataset.from_tensor_slices(data).prefetch(N).repeat().shuffle(N).batch(batch_size)
)
data_minibatch_it = iter(data_minibatch)
def svgp_ordinary_loss_cb() -> tf.Tensor:
batch = next(data_minibatch_it)
return -svgp_ordinary.log_marginal_likelihood(batch)
def svgp_natgrad_loss_cb() -> tf.Tensor:
batch = next(data_minibatch_it)
return -svgp_natgrad.log_marginal_likelihood(batch)
for _ in range(ci_niter(100)):
ordinary_adam_opt.minimize(svgp_ordinary_loss_cb, var_list=svgp_ordinary.trainable_variables)
for _ in range(ci_niter(100)):
natgrad_adam_opt.minimize(svgp_natgrad_loss_cb, var_list=svgp_natgrad.trainable_variables)
natgrad_opt.minimize(svgp_natgrad_loss_cb, var_list=variational_params)
# %% [markdown]
# SVGP likelihood after ordinary `Adam` optimization:
# %%
np.average([svgp_ordinary.log_likelihood(next(data_minibatch_it)) for _ in ci_range(100)])
# %% [markdown]
# SVGP likelihood after `NaturalGradient` and `Adam` optimization:
# %%
np.average([svgp_natgrad.log_likelihood(next(data_minibatch_it)) for _ in ci_range(100)])
# %% [markdown]
# ### Comparison with ordinary gradients in the non-conjugate case
# #### Binary classification
#
# ##### (Take home message: natural gradients are usually better)
#
# We can use natural gradients even when the likelihood isn't Gaussian. It isn't guaranteed to be better, but it usually is better in practical situations.
# %%
y_binary = np.random.choice([1.0, -1], size=x.shape)
vgp_data = (x, y_binary)
vgp_bernoulli = VGP(
vgp_data, kernel=gpflow.kernels.Matern52(), likelihood=gpflow.likelihoods.Bernoulli()
)
vgp_bernoulli_natgrad = VGP(
vgp_data, kernel=gpflow.kernels.Matern52(), likelihood=gpflow.likelihoods.Bernoulli()
)
# ordinary gradients with Adam for VGP with Bernoulli likelihood
adam_opt = tf.optimizers.Adam(adam_learning_rate)
# NatGrads and Adam for VGP with Bernoulli likelihood
# Stop Adam from optimizing the variational parameters
set_trainable(vgp_bernoulli_natgrad.q_mu, False)
set_trainable(vgp_bernoulli_natgrad.q_sqrt, False)
# Create the optimize_tensors for VGP with natural gradients
natgrad_adam_opt = tf.optimizers.Adam(adam_learning_rate)
natgrad_opt = NaturalGradient(gamma=0.1)
variational_params = [(vgp_bernoulli_natgrad.q_mu, vgp_bernoulli_natgrad.q_sqrt)]
# %%
# Optimize vgp_bernoulli
for _ in range(ci_niter(100)):
adam_opt.minimize(
lambda: -vgp_bernoulli.log_marginal_likelihood(), var_list=vgp_bernoulli.trainable_variables
)
# Optimize vgp_bernoulli_natgrad
for _ in range(ci_niter(100)):
adam_opt.minimize(
lambda: -vgp_bernoulli_natgrad.log_marginal_likelihood(),
var_list=vgp_bernoulli_natgrad.trainable_variables,
)
natgrad_opt.minimize(
lambda: -vgp_bernoulli_natgrad.log_marginal_likelihood(), var_list=variational_params
)
# %% [markdown]
# VGP likelihood after ordinary `Adam` optimization:
# %%
vgp_bernoulli.log_likelihood().numpy()
# %% [markdown]
# VGP likelihood after `NaturalGradient` + `Adam` optimization:
# %%
vgp_bernoulli_natgrad.log_likelihood().numpy()
# %% [markdown]
# We can also choose to run natural gradients in another parameterization.<br>
# The sensible choice is the model parameters (q_mu, q_sqrt), which is already in GPflow.
# %%
vgp_bernoulli_natgrads_xi = VGP(
vgp_data, kernel=gpflow.kernels.Matern52(), likelihood=gpflow.likelihoods.Bernoulli()
)
# Stop Adam from optimizing the variational parameters
set_trainable(vgp_bernoulli_natgrads_xi.q_mu, False)
set_trainable(vgp_bernoulli_natgrads_xi.q_sqrt, False)
# Create the optimize_tensors for VGP with Bernoulli likelihood
adam_opt = tf.optimizers.Adam(adam_learning_rate)
natgrad_opt = NaturalGradient(gamma=0.01)
variational_params = [
(vgp_bernoulli_natgrads_xi.q_mu, vgp_bernoulli_natgrads_xi.q_sqrt, XiSqrtMeanVar())
]
# %%
# Optimize vgp_bernoulli_natgrads_xi
for _ in range(ci_niter(100)):
adam_opt.minimize(
lambda: -vgp_bernoulli_natgrads_xi.log_marginal_likelihood(),
var_list=vgp_bernoulli_natgrads_xi.trainable_variables,
)
natgrad_opt.minimize(
lambda: -vgp_bernoulli_natgrads_xi.log_marginal_likelihood(), var_list=variational_params
)
# %% [markdown]
# VGP likelihood after `NaturalGradient` with `XiSqrtMeanVar` + `Adam` optimization:
# %%
vgp_bernoulli_natgrads_xi.log_likelihood().numpy()
# %% [markdown]
# With sufficiently small steps, it shouldn't make a difference which transform is used, but for large
# steps this can make a difference in practice.
| [
"tensorflow.random.uniform",
"gpflow.optimizers.natgrad.XiSqrtMeanVar",
"gpflow.ci_utils.ci_niter",
"gpflow.kernels.Matern52",
"tensorflow.random.set_seed",
"tensorflow.data.Dataset.from_tensor_slices",
"numpy.random.choice",
"tensorflow.print",
"gpflow.likelihoods.Bernoulli",
"gpflow.optimizers.N... | [((817, 834), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (831, 834), True, 'import numpy as np\n'), ((835, 856), 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(0)'], {}), '(0)\n', (853, 856), True, 'import tensorflow as tf\n'), ((919, 949), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': '(N, D)'}), '(size=(N, D))\n', (936, 949), True, 'import numpy as np\n'), ((1031, 1056), 'tensorflow.random.uniform', 'tf.random.uniform', (['(M, D)'], {}), '((M, D))\n', (1048, 1056), True, 'import tensorflow as tf\n'), ((1096, 1107), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(5)'], {}), '(5)\n', (1104, 1107), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((2340, 2366), 'gpflow.optimizers.NaturalGradient', 'NaturalGradient', ([], {'gamma': '(1.0)'}), '(gamma=1.0)\n', (2355, 2366), False, 'from gpflow.optimizers import NaturalGradient\n'), ((3140, 3170), 'gpflow.set_trainable', 'set_trainable', (['vgp.q_mu', '(False)'], {}), '(vgp.q_mu, False)\n', (3153, 3170), False, 'from gpflow import set_trainable\n'), ((3171, 3203), 'gpflow.set_trainable', 'set_trainable', (['vgp.q_sqrt', '(False)'], {}), '(vgp.q_sqrt, False)\n', (3184, 3203), False, 'from gpflow import set_trainable\n'), ((3224, 3262), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['adam_learning_rate'], {}), '(adam_learning_rate)\n', (3242, 3262), True, 'import tensorflow as tf\n'), ((3282, 3320), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['adam_learning_rate'], {}), '(adam_learning_rate)\n', (3300, 3320), True, 'import tensorflow as tf\n'), ((5172, 5198), 'gpflow.optimizers.NaturalGradient', 'NaturalGradient', ([], {'gamma': '(1.0)'}), '(gamma=1.0)\n', (5187, 5198), False, 'from gpflow.optimizers import NaturalGradient\n'), ((5578, 5604), 'gpflow.optimizers.NaturalGradient', 'NaturalGradient', ([], {'gamma': '(0.1)'}), '(gamma=0.1)\n', (5593, 5604), False, 'from gpflow.optimizers import NaturalGradient\n'), ((7174, 7212), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['adam_learning_rate'], {}), '(adam_learning_rate)\n', (7192, 7212), True, 'import tensorflow as tf\n'), ((7298, 7337), 'gpflow.set_trainable', 'set_trainable', (['svgp_natgrad.q_mu', '(False)'], {}), '(svgp_natgrad.q_mu, False)\n', (7311, 7337), False, 'from gpflow import set_trainable\n'), ((7338, 7379), 'gpflow.set_trainable', 'set_trainable', (['svgp_natgrad.q_sqrt', '(False)'], {}), '(svgp_natgrad.q_sqrt, False)\n', (7351, 7379), False, 'from gpflow import set_trainable\n'), ((7439, 7477), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['adam_learning_rate'], {}), '(adam_learning_rate)\n', (7457, 7477), True, 'import tensorflow as tf\n'), ((7493, 7519), 'gpflow.optimizers.NaturalGradient', 'NaturalGradient', ([], {'gamma': '(0.1)'}), '(gamma=0.1)\n', (7508, 7519), False, 'from gpflow.optimizers import NaturalGradient\n'), ((9104, 9145), 'numpy.random.choice', 'np.random.choice', (['[1.0, -1]'], {'size': 'x.shape'}), '([1.0, -1], size=x.shape)\n', (9120, 9145), True, 'import numpy as np\n'), ((9483, 9521), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['adam_learning_rate'], {}), '(adam_learning_rate)\n', (9501, 9521), True, 'import tensorflow as tf\n'), ((9632, 9680), 'gpflow.set_trainable', 'set_trainable', (['vgp_bernoulli_natgrad.q_mu', '(False)'], {}), '(vgp_bernoulli_natgrad.q_mu, False)\n', (9645, 9680), False, 'from gpflow import set_trainable\n'), ((9681, 9731), 'gpflow.set_trainable', 'set_trainable', (['vgp_bernoulli_natgrad.q_sqrt', '(False)'], {}), '(vgp_bernoulli_natgrad.q_sqrt, False)\n', (9694, 9731), False, 'from gpflow import set_trainable\n'), ((9813, 9851), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['adam_learning_rate'], {}), '(adam_learning_rate)\n', (9831, 9851), True, 'import tensorflow as tf\n'), ((9866, 9892), 'gpflow.optimizers.NaturalGradient', 'NaturalGradient', ([], {'gamma': '(0.1)'}), '(gamma=0.1)\n', (9881, 9892), False, 'from gpflow.optimizers import NaturalGradient\n'), ((11135, 11187), 'gpflow.set_trainable', 'set_trainable', (['vgp_bernoulli_natgrads_xi.q_mu', '(False)'], {}), '(vgp_bernoulli_natgrads_xi.q_mu, False)\n', (11148, 11187), False, 'from gpflow import set_trainable\n'), ((11188, 11242), 'gpflow.set_trainable', 'set_trainable', (['vgp_bernoulli_natgrads_xi.q_sqrt', '(False)'], {}), '(vgp_bernoulli_natgrads_xi.q_sqrt, False)\n', (11201, 11242), False, 'from gpflow import set_trainable\n'), ((11319, 11357), 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['adam_learning_rate'], {}), '(adam_learning_rate)\n', (11337, 11357), True, 'import tensorflow as tf\n'), ((11372, 11399), 'gpflow.optimizers.NaturalGradient', 'NaturalGradient', ([], {'gamma': '(0.01)'}), '(gamma=0.01)\n', (11387, 11399), False, 'from gpflow.optimizers import NaturalGradient\n'), ((954, 975), 'numpy.sin', 'np.sin', (['(10 * x[:, :1])'], {}), '(10 * x[:, :1])\n', (960, 975), True, 'import numpy as np\n'), ((3515, 3589), 'tensorflow.print', 'tf.print', (['f"""GPR with Adam: iteration {i + 1} likelihood {likelihood:.04f}"""'], {}), "(f'GPR with Adam: iteration {i + 1} likelihood {likelihood:.04f}')\n", (3523, 3589), True, 'import tensorflow as tf\n'), ((3878, 3982), 'tensorflow.print', 'tf.print', (['f"""VGP with NaturalGradient and Adam: iteration {i + 1} likelihood {likelihood:.04f}"""'], {}), "(\n f'VGP with NaturalGradient and Adam: iteration {i + 1} likelihood {likelihood:.04f}'\n )\n", (3886, 3982), True, 'import tensorflow as tf\n'), ((5910, 5923), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(100)'], {}), '(100)\n', (5918, 5923), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((8082, 8095), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(100)'], {}), '(100)\n', (8090, 8095), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((8213, 8226), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(100)'], {}), '(100)\n', (8221, 8226), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((10021, 10034), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(100)'], {}), '(100)\n', (10029, 10034), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((10216, 10229), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(100)'], {}), '(100)\n', (10224, 10229), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((11572, 11585), 'gpflow.ci_utils.ci_niter', 'ci_niter', (['(100)'], {}), '(100)\n', (11580, 11585), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((1418, 1443), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (1441, 1443), False, 'import gpflow\n'), ((1776, 1801), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (1799, 1801), False, 'import gpflow\n'), ((1814, 1843), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (1841, 1843), False, 'import gpflow\n'), ((4554, 4579), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (4577, 4579), False, 'import gpflow\n'), ((4596, 4625), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (4623, 4625), False, 'import gpflow\n'), ((4695, 4720), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (4718, 4720), False, 'import gpflow\n'), ((6849, 6874), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (6872, 6874), False, 'import gpflow\n'), ((6891, 6920), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (6918, 6920), False, 'import gpflow\n'), ((6997, 7022), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (7020, 7022), False, 'import gpflow\n'), ((7039, 7068), 'gpflow.likelihoods.Gaussian', 'gpflow.likelihoods.Gaussian', ([], {}), '()\n', (7066, 7068), False, 'import gpflow\n'), ((9214, 9239), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (9237, 9239), False, 'import gpflow\n'), ((9252, 9282), 'gpflow.likelihoods.Bernoulli', 'gpflow.likelihoods.Bernoulli', ([], {}), '()\n', (9280, 9282), False, 'import gpflow\n'), ((9335, 9360), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (9358, 9360), False, 'import gpflow\n'), ((9373, 9403), 'gpflow.likelihoods.Bernoulli', 'gpflow.likelihoods.Bernoulli', ([], {}), '()\n', (9401, 9403), False, 'import gpflow\n'), ((11008, 11033), 'gpflow.kernels.Matern52', 'gpflow.kernels.Matern52', ([], {}), '()\n', (11031, 11033), False, 'import gpflow\n'), ((11046, 11076), 'gpflow.likelihoods.Bernoulli', 'gpflow.likelihoods.Bernoulli', ([], {}), '()\n', (11074, 11076), False, 'import gpflow\n'), ((11495, 11510), 'gpflow.optimizers.natgrad.XiSqrtMeanVar', 'XiSqrtMeanVar', ([], {}), '()\n', (11508, 11510), False, 'from gpflow.optimizers.natgrad import XiSqrtMeanVar\n'), ((6150, 6163), 'gpflow.ci_utils.ci_range', 'ci_range', (['(100)'], {}), '(100)\n', (6158, 6163), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((8552, 8565), 'gpflow.ci_utils.ci_range', 'ci_range', (['(100)'], {}), '(100)\n', (8560, 8565), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((8732, 8745), 'gpflow.ci_utils.ci_range', 'ci_range', (['(100)'], {}), '(100)\n', (8740, 8745), False, 'from gpflow.ci_utils import ci_niter, ci_range\n'), ((5629, 5669), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['data'], {}), '(data)\n', (5663, 5669), True, 'import tensorflow as tf\n'), ((7659, 7699), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['data'], {}), '(data)\n', (7693, 7699), True, 'import tensorflow as tf\n')] |
import numpy as np
import matplotlib.pyplot as plt
train_historys1 = np.load("data/useful/train_historys_map7_GA_3(9, 15,8, 3).npy")
train_historys2 = np.load("data/useful/train_historys_map7_GA_4(9, 15,8, 3).npy")
train_historys3 = np.load("data/useful/train_historys_map7_GA_5(9, 15,8, 3).npy")
train_historys4 = np.load("data/useful/train_historys_map7_GA_6(9, 15,8, 3).npy")
train_historys5 = np.load("data/useful/train_historys_map7_GA_9(9, 15,8, 3).npy")
train_historys6 = np.load("data/useful/train_historys_map7_CGA_3(9, 15,8, 3).npy")
train_historys7 = np.load("data/useful/train_historys_map7_CGA_4(9, 15,8, 3).npy")
train_historys8 = np.load("data/useful/train_historys_map7_CGA_5(9, 15,8, 3).npy")
train_historys9 = np.load("data/useful/train_historys_map7_CGA_6(9, 15,8, 3).npy")
train_historys10 = np.load("data/useful/train_historys_map7_CGA_2(9, 15,8, 3).npy")
train_historys_cga=np.dstack((train_historys6[:300,:],train_historys7[:300,:],train_historys8[:300,:],train_historys9[:300,:],train_historys10[:300,:]))
train_historys_cga=np.mean(train_historys_cga,axis=2)
train_historys_ga=np.dstack((train_historys1[:300,:],train_historys2[:300,:],train_historys3[:300,:],train_historys4[:300,:],train_historys5[:300,:]))
train_historys_ga=np.mean(train_historys_ga,axis=2)
meandistance_idx,meandistance=[i for i,data in enumerate(train_historys_ga[:,0])],[data for i,data in enumerate(train_historys_ga[:,0])]
maxdistance_idx,maxdistance=[i for i,data in enumerate(train_historys_ga[:,1])],[data for i,data in enumerate(train_historys_ga[:,1])]
meandistance_CGA_idx,meandistance_CGA=[i for i,data in enumerate(train_historys_cga[:300,0])],[data for i,data in enumerate(train_historys_cga[:300,0])]
maxdistance_CGA_idx,maxdistance_CGA=[i for i,data in enumerate(train_historys_cga[:300,1])],[data for i,data in enumerate(train_historys_cga[:300,1])]
# plt.subplot(1, 2, 1)
plt.plot(meandistance_idx,meandistance,label="GA")
# plt.plot(maxdistance_CGA_idx,meandistance_CGA,label="CGA")
plt.axis([-1, 300 ,-1, 5000])
plt.ylabel("Mean Distance", fontsize=16)
plt.xlabel("generation", fontsize=16)
plt.legend(loc=9, borderaxespad=0.)
# plt.subplot(1, 2, 2)
# plt.plot(maxdistance_idx,maxdistance,label="GA")
# plt.plot(maxdistance_CGA_idx,maxdistance_CGA,label="CGA")
# plt.axis([-1, 300 ,-1, 350000])
# plt.ylabel("Distance", fontsize=16)
# plt.xlabel("generation", fontsize=16)
# plt.legend(loc=9, borderaxespad=0.)
plt.show() | [
"numpy.dstack",
"numpy.mean",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.axis",
"numpy.load",
"matplotlib.pyplot.legend",
"matplotlib.pyplot.show"
] | [((72, 135), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_GA_3(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_GA_3(9, 15,8, 3).npy')\n", (79, 135), True, 'import numpy as np\n'), ((154, 217), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_GA_4(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_GA_4(9, 15,8, 3).npy')\n", (161, 217), True, 'import numpy as np\n'), ((236, 299), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_GA_5(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_GA_5(9, 15,8, 3).npy')\n", (243, 299), True, 'import numpy as np\n'), ((318, 381), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_GA_6(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_GA_6(9, 15,8, 3).npy')\n", (325, 381), True, 'import numpy as np\n'), ((400, 463), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_GA_9(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_GA_9(9, 15,8, 3).npy')\n", (407, 463), True, 'import numpy as np\n'), ((483, 547), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_CGA_3(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_CGA_3(9, 15,8, 3).npy')\n", (490, 547), True, 'import numpy as np\n'), ((566, 630), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_CGA_4(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_CGA_4(9, 15,8, 3).npy')\n", (573, 630), True, 'import numpy as np\n'), ((649, 713), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_CGA_5(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_CGA_5(9, 15,8, 3).npy')\n", (656, 713), True, 'import numpy as np\n'), ((732, 796), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_CGA_6(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_CGA_6(9, 15,8, 3).npy')\n", (739, 796), True, 'import numpy as np\n'), ((816, 880), 'numpy.load', 'np.load', (['"""data/useful/train_historys_map7_CGA_2(9, 15,8, 3).npy"""'], {}), "('data/useful/train_historys_map7_CGA_2(9, 15,8, 3).npy')\n", (823, 880), True, 'import numpy as np\n'), ((902, 1053), 'numpy.dstack', 'np.dstack', (['(train_historys6[:300, :], train_historys7[:300, :], train_historys8[:300,\n :], train_historys9[:300, :], train_historys10[:300, :])'], {}), '((train_historys6[:300, :], train_historys7[:300, :],\n train_historys8[:300, :], train_historys9[:300, :], train_historys10[:\n 300, :]))\n', (911, 1053), True, 'import numpy as np\n'), ((1055, 1090), 'numpy.mean', 'np.mean', (['train_historys_cga'], {'axis': '(2)'}), '(train_historys_cga, axis=2)\n', (1062, 1090), True, 'import numpy as np\n'), ((1109, 1259), 'numpy.dstack', 'np.dstack', (['(train_historys1[:300, :], train_historys2[:300, :], train_historys3[:300,\n :], train_historys4[:300, :], train_historys5[:300, :])'], {}), '((train_historys1[:300, :], train_historys2[:300, :],\n train_historys3[:300, :], train_historys4[:300, :], train_historys5[:\n 300, :]))\n', (1118, 1259), True, 'import numpy as np\n'), ((1260, 1294), 'numpy.mean', 'np.mean', (['train_historys_ga'], {'axis': '(2)'}), '(train_historys_ga, axis=2)\n', (1267, 1294), True, 'import numpy as np\n'), ((1904, 1956), 'matplotlib.pyplot.plot', 'plt.plot', (['meandistance_idx', 'meandistance'], {'label': '"""GA"""'}), "(meandistance_idx, meandistance, label='GA')\n", (1912, 1956), True, 'import matplotlib.pyplot as plt\n'), ((2016, 2045), 'matplotlib.pyplot.axis', 'plt.axis', (['[-1, 300, -1, 5000]'], {}), '([-1, 300, -1, 5000])\n', (2024, 2045), True, 'import matplotlib.pyplot as plt\n'), ((2046, 2086), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Mean Distance"""'], {'fontsize': '(16)'}), "('Mean Distance', fontsize=16)\n", (2056, 2086), True, 'import matplotlib.pyplot as plt\n'), ((2087, 2124), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""generation"""'], {'fontsize': '(16)'}), "('generation', fontsize=16)\n", (2097, 2124), True, 'import matplotlib.pyplot as plt\n'), ((2125, 2161), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '(9)', 'borderaxespad': '(0.0)'}), '(loc=9, borderaxespad=0.0)\n', (2135, 2161), True, 'import matplotlib.pyplot as plt\n'), ((2445, 2455), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2453, 2455), True, 'import matplotlib.pyplot as plt\n')] |
import os
import cv2
import dlib
import json
import yaml
import numpy as np
from time import monotonic as now
from datetime import timedelta
from age_gender.preprocess.face_aligner import FaceAligner
from concurrent.futures import ProcessPoolExecutor, as_completed
def get_area(rect):
left = rect.left()
top = rect.top()
right = rect.right()
bottom = rect.bottom()
return (bottom - top) * (right - left)
class Converter:
def __init__(self, config):
self.config = config
self.dataset_path = config['general']['dataset_path']
self.shape_predictor = 'shape_predictor_68_face_landmarks.dat'
self.detector = dlib.get_frontal_face_detector()
self.predictor = dlib.shape_predictor(self.shape_predictor)
self.face_aligner = FaceAligner(config['image'], self.predictor)
def convert_dataset(self, slice_limits, pid):
self.slice = slice_limits
self.pid = pid
dataset_folder = os.path.dirname(self.dataset_path)
processed_dataset_path = self.config['general']['processed_dataset_path']
with open(self.dataset_path) as f:
dataset = json.load(f)[self.slice[0]: self.slice[1]]
total = self.slice[1] - self.slice[0]
new_dataset = []
bad_records = {'age': 0, 'gender': 0, 'small_faces': 0}
start_time = now()
for ind, record in enumerate(dataset):
if self.need_print(ind):
ratio = ind / total
eta = timedelta(seconds=(now() - start_time) * (1 - ratio) / (ratio + 1e-9))
print(f'pid: {self.pid}, progress: {round(ratio*100, 1)}% {ind}/{total} images, eta={eta}')
if not (isinstance(record['gender'], float) and isinstance(record['gender'], int)):
bad_records['gender'] += 1
continue
if not isinstance(record['age'], int):
bad_records['age'] += 1
continue
elif not 0 < record['age'] <= 100:
bad_records['age'] += 1
continue
file_name = record['file_name'][0]
image_path = os.path.join(dataset_folder, file_name)
save_path = os.path.join(processed_dataset_path, file_name)
if not os.path.exists(save_path):
processed_image = self.convert_image(image_path)
if processed_image is None:
bad_records['small_faces'] += 1
continue
else:
save_folder_path = os.path.dirname(os.path.abspath(save_path))
if not os.path.exists(save_folder_path):
os.makedirs(save_folder_path)
cv2.imwrite(save_path, processed_image)
new_dataset.append({'file_name': file_name, 'gender': int(record['gender']), 'age': record['age']})
print(f'pid: {self.pid}, total: {total} images, time={now() - start_time}')
return new_dataset, bad_records
def convert_image(self, image_path):
face_area_threshold = self.config['image']['face_area_threshold']
image = cv2.imread(image_path, cv2.IMREAD_COLOR)
gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
rects = self.detector(image, 2)
if len(rects) != 1:
return None
else:
rect = rects[0]
area = get_area(rect)
if area < face_area_threshold:
return None
aligned_face = self.face_aligner.align(image, gray_image, rect)
return aligned_face
@staticmethod
def need_print(ind):
if ind == 0:
return False
elif ind == 10:
return True
elif ind < 1000:
return ind % 100 == 0
else:
return ind % 1000 == 0
@staticmethod
def run_job(config, slice_limits, pid):
converter = Converter(config)
return converter.convert_dataset(slice_limits, pid)
class ConverterManager:
def __init__(self, config):
self.config = config
self.n_jobs = config['general']['n_jobs']
self.dataset_path = config['general']['dataset_path']
self.processed_dataset_path = self.config['general']['processed_dataset_path']
def run(self):
with open(self.dataset_path) as f:
dataset_len = len(json.load(f))
with ProcessPoolExecutor(max_workers=self.n_jobs) as executor:
futures = list()
subsets = np.linspace(0, dataset_len, self.n_jobs + 1, dtype=np.int)
for ind in range(self.n_jobs):
start = subsets[ind]
finish = subsets[ind+1]
print(f'pid: {ind}, slice: [{start}:{finish}]')
futures.append(executor.submit(Converter.run_job, self.config, (start, finish), ind))
new_dataset = list()
bad_records = {'age': 0, 'gender': 0, 'small_faces': 0}
for job in as_completed(futures):
new_dataset += job.result()[0]
bad_records = {k: bad_records[k] + v for k, v in job.result()[1].items()}
with open(os.path.join(self.processed_dataset_path, 'dataset.json'), 'w') as f:
json.dump(new_dataset, f)
self._save_dataset_config()
print('Records with incorrect age: ', bad_records['age'])
print('Records with incorrect gender: ', bad_records['gender'])
print('Records with small faces: ', bad_records['small_faces'])
print('Total records transformed %d/%d' % (len(new_dataset), dataset_len))
def _save_dataset_config(self):
with open(os.path.join(self.processed_dataset_path, 'config.yaml'), 'w') as file:
yaml.dump(self.config, file, default_flow_style=False)
| [
"os.path.exists",
"cv2.imwrite",
"age_gender.preprocess.face_aligner.FaceAligner",
"os.makedirs",
"yaml.dump",
"time.monotonic",
"os.path.join",
"dlib.shape_predictor",
"os.path.abspath",
"concurrent.futures.as_completed",
"os.path.dirname",
"dlib.get_frontal_face_detector",
"numpy.linspace"... | [((665, 697), 'dlib.get_frontal_face_detector', 'dlib.get_frontal_face_detector', ([], {}), '()\n', (695, 697), False, 'import dlib\n'), ((723, 765), 'dlib.shape_predictor', 'dlib.shape_predictor', (['self.shape_predictor'], {}), '(self.shape_predictor)\n', (743, 765), False, 'import dlib\n'), ((794, 838), 'age_gender.preprocess.face_aligner.FaceAligner', 'FaceAligner', (["config['image']", 'self.predictor'], {}), "(config['image'], self.predictor)\n", (805, 838), False, 'from age_gender.preprocess.face_aligner import FaceAligner\n'), ((972, 1006), 'os.path.dirname', 'os.path.dirname', (['self.dataset_path'], {}), '(self.dataset_path)\n', (987, 1006), False, 'import os\n'), ((1354, 1359), 'time.monotonic', 'now', ([], {}), '()\n', (1357, 1359), True, 'from time import monotonic as now\n'), ((3143, 3183), 'cv2.imread', 'cv2.imread', (['image_path', 'cv2.IMREAD_COLOR'], {}), '(image_path, cv2.IMREAD_COLOR)\n', (3153, 3183), False, 'import cv2\n'), ((3205, 3244), 'cv2.cvtColor', 'cv2.cvtColor', (['image', 'cv2.COLOR_BGR2GRAY'], {}), '(image, cv2.COLOR_BGR2GRAY)\n', (3217, 3244), False, 'import cv2\n'), ((2146, 2185), 'os.path.join', 'os.path.join', (['dataset_folder', 'file_name'], {}), '(dataset_folder, file_name)\n', (2158, 2185), False, 'import os\n'), ((2210, 2257), 'os.path.join', 'os.path.join', (['processed_dataset_path', 'file_name'], {}), '(processed_dataset_path, file_name)\n', (2222, 2257), False, 'import os\n'), ((4406, 4450), 'concurrent.futures.ProcessPoolExecutor', 'ProcessPoolExecutor', ([], {'max_workers': 'self.n_jobs'}), '(max_workers=self.n_jobs)\n', (4425, 4450), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((4515, 4573), 'numpy.linspace', 'np.linspace', (['(0)', 'dataset_len', '(self.n_jobs + 1)'], {'dtype': 'np.int'}), '(0, dataset_len, self.n_jobs + 1, dtype=np.int)\n', (4526, 4573), True, 'import numpy as np\n'), ((4985, 5006), 'concurrent.futures.as_completed', 'as_completed', (['futures'], {}), '(futures)\n', (4997, 5006), False, 'from concurrent.futures import ProcessPoolExecutor, as_completed\n'), ((5246, 5271), 'json.dump', 'json.dump', (['new_dataset', 'f'], {}), '(new_dataset, f)\n', (5255, 5271), False, 'import json\n'), ((5740, 5794), 'yaml.dump', 'yaml.dump', (['self.config', 'file'], {'default_flow_style': '(False)'}), '(self.config, file, default_flow_style=False)\n', (5749, 5794), False, 'import yaml\n'), ((1154, 1166), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1163, 1166), False, 'import json\n'), ((2277, 2302), 'os.path.exists', 'os.path.exists', (['save_path'], {}), '(save_path)\n', (2291, 2302), False, 'import os\n'), ((4378, 4390), 'json.load', 'json.load', (['f'], {}), '(f)\n', (4387, 4390), False, 'import json\n'), ((5164, 5221), 'os.path.join', 'os.path.join', (['self.processed_dataset_path', '"""dataset.json"""'], {}), "(self.processed_dataset_path, 'dataset.json')\n", (5176, 5221), False, 'import os\n'), ((5656, 5712), 'os.path.join', 'os.path.join', (['self.processed_dataset_path', '"""config.yaml"""'], {}), "(self.processed_dataset_path, 'config.yaml')\n", (5668, 5712), False, 'import os\n'), ((2734, 2773), 'cv2.imwrite', 'cv2.imwrite', (['save_path', 'processed_image'], {}), '(save_path, processed_image)\n', (2745, 2773), False, 'import cv2\n'), ((2571, 2597), 'os.path.abspath', 'os.path.abspath', (['save_path'], {}), '(save_path)\n', (2586, 2597), False, 'import os\n'), ((2626, 2658), 'os.path.exists', 'os.path.exists', (['save_folder_path'], {}), '(save_folder_path)\n', (2640, 2658), False, 'import os\n'), ((2684, 2713), 'os.makedirs', 'os.makedirs', (['save_folder_path'], {}), '(save_folder_path)\n', (2695, 2713), False, 'import os\n'), ((2948, 2953), 'time.monotonic', 'now', ([], {}), '()\n', (2951, 2953), True, 'from time import monotonic as now\n'), ((1521, 1526), 'time.monotonic', 'now', ([], {}), '()\n', (1524, 1526), True, 'from time import monotonic as now\n')] |
from __future__ import division, print_function, absolute_import
import numpy as np
from scipy.sparse.sputils import isshape, isintlike
from scipy.sparse import isspmatrix
__all__ = ['LinearOperator', 'aslinearoperator']
class LinearOperator(object):
"""Common interface for performing matrix vector products
Many iterative methods (e.g. cg, gmres) do not need to know the
individual entries of a matrix to solve a linear system A*x=b.
Such solvers only require the computation of matrix vector
products, A*v where v is a dense vector. This class serves as
an abstract interface between iterative solvers and matrix-like
objects.
Parameters
----------
shape : tuple
Matrix dimensions (M,N)
matvec : callable f(v)
Returns returns A * v.
Other Parameters
----------------
rmatvec : callable f(v)
Returns A^H * v, where A^H is the conjugate transpose of A.
matmat : callable f(V)
Returns A * V, where V is a dense matrix with dimensions (N,K).
dtype : dtype
Data type of the matrix.
Attributes
----------
args : tuple
For linear operators describing products etc. of other linear
operators, the operands of the binary operation.
See Also
--------
aslinearoperator : Construct LinearOperators
Notes
-----
The user-defined matvec() function must properly handle the case
where v has shape (N,) as well as the (N,1) case. The shape of
the return type is handled internally by LinearOperator.
LinearOperator instances can also be multiplied, added with each
other and exponentiated, to produce a new linear operator.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import LinearOperator
>>> def mv(v):
... return np.array([2*v[0], 3*v[1]])
...
>>> A = LinearOperator((2,2), matvec=mv)
>>> A
<2x2 LinearOperator with unspecified dtype>
>>> A.matvec(np.ones(2))
array([ 2., 3.])
>>> A * np.ones(2)
array([ 2., 3.])
"""
def __init__(self, shape, matvec, rmatvec=None, matmat=None, dtype=None):
shape = tuple(shape)
if not isshape(shape):
raise ValueError('invalid shape')
self.shape = shape
self._matvec = matvec
self.args = ()
if rmatvec is None:
def rmatvec(v):
raise NotImplementedError('rmatvec is not defined')
self.rmatvec = rmatvec
else:
self.rmatvec = rmatvec
if matmat is not None:
# matvec each column of V
self._matmat = matmat
if dtype is not None:
self.dtype = np.dtype(dtype)
def _matmat(self, X):
"""Default matrix-matrix multiplication handler. Falls back on
the user-defined matvec() routine, which is always provided.
"""
return np.hstack([self.matvec(col.reshape(-1,1)) for col in X.T])
def matvec(self, x):
"""Matrix-vector multiplication
Performs the operation y=A*x where A is an MxN linear
operator and x is a column vector or rank-1 array.
Parameters
----------
x : {matrix, ndarray}
An array with shape (N,) or (N,1).
Returns
-------
y : {matrix, ndarray}
A matrix or ndarray with shape (M,) or (M,1) depending
on the type and shape of the x argument.
Notes
-----
This matvec wraps the user-specified matvec routine to ensure that
y has the correct shape and type.
"""
x = np.asanyarray(x)
M,N = self.shape
if x.shape != (N,) and x.shape != (N,1):
raise ValueError('dimension mismatch')
y = self._matvec(x)
if isinstance(x, np.matrix):
y = np.asmatrix(y)
else:
y = np.asarray(y)
if x.ndim == 1:
y = y.reshape(M)
elif x.ndim == 2:
y = y.reshape(M,1)
else:
raise ValueError('invalid shape returned by user-defined matvec()')
return y
def matmat(self, X):
"""Matrix-matrix multiplication
Performs the operation y=A*X where A is an MxN linear
operator and X dense N*K matrix or ndarray.
Parameters
----------
X : {matrix, ndarray}
An array with shape (N,K).
Returns
-------
Y : {matrix, ndarray}
A matrix or ndarray with shape (M,K) depending on
the type of the X argument.
Notes
-----
This matmat wraps any user-specified matmat routine to ensure that
y has the correct type.
"""
X = np.asanyarray(X)
if X.ndim != 2:
raise ValueError('expected rank-2 ndarray or matrix')
M,N = self.shape
if X.shape[0] != N:
raise ValueError('dimension mismatch')
Y = self._matmat(X)
if isinstance(Y, np.matrix):
Y = np.asmatrix(Y)
return Y
def __call__(self, x):
return self*x
def __mul__(self, x):
if isinstance(x, LinearOperator):
return _ProductLinearOperator(self, x)
elif np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
x = np.asarray(x)
if x.ndim == 1 or x.ndim == 2 and x.shape[1] == 1:
return self.matvec(x)
elif x.ndim == 2:
return self.matmat(x)
else:
raise ValueError('expected rank-1 or rank-2 array or matrix')
def dot(self, other):
# modeled after scipy.sparse.base.dot
return self * other
def __rmul__(self, x):
if np.isscalar(x):
return _ScaledLinearOperator(self, x)
else:
return NotImplemented
def __pow__(self, p):
if np.isscalar(p):
return _PowerLinearOperator(self, p)
else:
return NotImplemented
def __add__(self, x):
if isinstance(x, LinearOperator):
return _SumLinearOperator(self, x)
else:
return NotImplemented
def __neg__(self):
return _ScaledLinearOperator(self, -1)
def __sub__(self, x):
return self.__add__(-x)
def __repr__(self):
M,N = self.shape
if hasattr(self,'dtype'):
dt = 'dtype=' + str(self.dtype)
else:
dt = 'unspecified dtype'
return '<%dx%d %s with %s>' % (M, N, self.__class__.__name__, dt)
def _get_dtype(operators, dtypes=[]):
for obj in operators:
if obj is not None and hasattr(obj, 'dtype'):
dtypes.append(obj.dtype)
return np.find_common_type(dtypes, [])
class _SumLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape != B.shape:
raise ValueError('shape mismatch')
super(_SumLinearOperator, self).__init__(A.shape,
self.matvec, self.rmatvec, self.matmat, _get_dtype([A,B]))
self.args = (A, B)
def matvec(self, x):
return self.args[0].matvec(x) + self.args[1].matvec(x)
def rmatvec(self, x):
return self.args[0].rmatvec(x) + self.args[1].rmatvec(x)
def matmat(self, x):
return self.args[0].matmat(x) + self.args[1].matmat(x)
class _ProductLinearOperator(LinearOperator):
def __init__(self, A, B):
if not isinstance(A, LinearOperator) or \
not isinstance(B, LinearOperator):
raise ValueError('both operands have to be a LinearOperator')
if A.shape[1] != B.shape[0]:
raise ValueError('shape mismatch')
super(_ProductLinearOperator, self).__init__((A.shape[0], B.shape[1]),
self.matvec, self.rmatvec, self.matmat, _get_dtype([A,B]))
self.args = (A, B)
def matvec(self, x):
return self.args[0].matvec(self.args[1].matvec(x))
def rmatvec(self, x):
return self.args[1].rmatvec(self.args[0].rmatvec(x))
def matmat(self, x):
return self.args[0].matmat(self.args[1].matmat(x))
class _ScaledLinearOperator(LinearOperator):
def __init__(self, A, alpha):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if not np.isscalar(alpha):
raise ValueError('scalar expected as alpha')
super(_ScaledLinearOperator, self).__init__(A.shape,
self.matvec, self.rmatvec, self.matmat,
_get_dtype([A], [type(alpha)]))
self.args = (A, alpha)
def matvec(self, x):
return self.args[1] * self.args[0].matvec(x)
def rmatvec(self, x):
return np.conj(self.args[1]) * self.args[0].rmatvec(x)
def matmat(self, x):
return self.args[1] * self.args[0].matmat(x)
class _PowerLinearOperator(LinearOperator):
def __init__(self, A, p):
if not isinstance(A, LinearOperator):
raise ValueError('LinearOperator expected as A')
if A.shape[0] != A.shape[1]:
raise ValueError('square LinearOperator expected as A')
if not isintlike(p):
raise ValueError('integer expected as p')
super(_PowerLinearOperator, self).__init__(A.shape,
self.matvec, self.rmatvec, self.matmat,
_get_dtype([A]))
self.args = (A, p)
def _power(self, fun, x):
res = np.array(x, copy=True)
for i in range(self.args[1]):
res = fun(res)
return res
def matvec(self, x):
return self._power(self.args[0].matvec, x)
def rmatvec(self, x):
return self._power(self.args[0].rmatvec, x)
def matmat(self, x):
return self._power(self.args[0].matmat, x)
class MatrixLinearOperator(LinearOperator):
def __init__(self, A):
super(MatrixLinearOperator, self).__init__(shape=A.shape,
dtype=A.dtype, matvec=None, rmatvec=self.rmatvec)
self.matvec = A.dot
self.matmat = A.dot
self.__mul__ = A.dot
self.A = A
self.A_conj = None
self.args = (A,)
def rmatvec(self, x):
if self.A_conj is None:
self.A_conj = self.A.T.conj()
return self.A_conj.dot(x)
class IdentityOperator(LinearOperator):
def __init__(self, shape, dtype):
super(IdentityOperator, self).__init__(shape=shape, dtype=dtype,
matvec=None, rmatvec=self.rmatvec)
def matvec(self, x):
return x
def rmatvec(self, x):
return x
def matmat(self, x):
return x
def __mul__(self, x):
return x
def aslinearoperator(A):
"""Return A as a LinearOperator.
'A' may be any of the following types:
- ndarray
- matrix
- sparse matrix (e.g. csr_matrix, lil_matrix, etc.)
- LinearOperator
- An object with .shape and .matvec attributes
See the LinearOperator documentation for additional information.
Examples
--------
>>> from scipy import matrix
>>> M = matrix( [[1,2,3],[4,5,6]], dtype='int32' )
>>> aslinearoperator( M )
<2x3 LinearOperator with dtype=int32>
"""
if isinstance(A, LinearOperator):
return A
elif isinstance(A, np.ndarray) or isinstance(A, np.matrix):
if A.ndim > 2:
raise ValueError('array must have rank <= 2')
A = np.atleast_2d(np.asarray(A))
return MatrixLinearOperator(A)
elif isspmatrix(A):
return MatrixLinearOperator(A)
else:
if hasattr(A, 'shape') and hasattr(A, 'matvec'):
rmatvec = None
dtype = None
if hasattr(A, 'rmatvec'):
rmatvec = A.rmatvec
if hasattr(A, 'dtype'):
dtype = A.dtype
return LinearOperator(A.shape, A.matvec,
rmatvec=rmatvec, dtype=dtype)
else:
raise TypeError('type not understood')
| [
"scipy.sparse.isspmatrix",
"numpy.isscalar",
"numpy.conj",
"numpy.asmatrix",
"numpy.asarray",
"scipy.sparse.sputils.isshape",
"numpy.asanyarray",
"numpy.find_common_type",
"numpy.array",
"scipy.sparse.sputils.isintlike",
"numpy.dtype"
] | [((6786, 6817), 'numpy.find_common_type', 'np.find_common_type', (['dtypes', '[]'], {}), '(dtypes, [])\n', (6805, 6817), True, 'import numpy as np\n'), ((3651, 3667), 'numpy.asanyarray', 'np.asanyarray', (['x'], {}), '(x)\n', (3664, 3667), True, 'import numpy as np\n'), ((4776, 4792), 'numpy.asanyarray', 'np.asanyarray', (['X'], {}), '(X)\n', (4789, 4792), True, 'import numpy as np\n'), ((5805, 5819), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (5816, 5819), True, 'import numpy as np\n'), ((5957, 5971), 'numpy.isscalar', 'np.isscalar', (['p'], {}), '(p)\n', (5968, 5971), True, 'import numpy as np\n'), ((9667, 9689), 'numpy.array', 'np.array', (['x'], {'copy': '(True)'}), '(x, copy=True)\n', (9675, 9689), True, 'import numpy as np\n'), ((2209, 2223), 'scipy.sparse.sputils.isshape', 'isshape', (['shape'], {}), '(shape)\n', (2216, 2223), False, 'from scipy.sparse.sputils import isshape, isintlike\n'), ((2721, 2736), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (2729, 2736), True, 'import numpy as np\n'), ((3878, 3892), 'numpy.asmatrix', 'np.asmatrix', (['y'], {}), '(y)\n', (3889, 3892), True, 'import numpy as np\n'), ((3923, 3936), 'numpy.asarray', 'np.asarray', (['y'], {}), '(y)\n', (3933, 3936), True, 'import numpy as np\n'), ((5073, 5087), 'numpy.asmatrix', 'np.asmatrix', (['Y'], {}), '(Y)\n', (5084, 5087), True, 'import numpy as np\n'), ((5289, 5303), 'numpy.isscalar', 'np.isscalar', (['x'], {}), '(x)\n', (5300, 5303), True, 'import numpy as np\n'), ((8554, 8572), 'numpy.isscalar', 'np.isscalar', (['alpha'], {}), '(alpha)\n', (8565, 8572), True, 'import numpy as np\n'), ((8948, 8969), 'numpy.conj', 'np.conj', (['self.args[1]'], {}), '(self.args[1])\n', (8955, 8969), True, 'import numpy as np\n'), ((9378, 9390), 'scipy.sparse.sputils.isintlike', 'isintlike', (['p'], {}), '(p)\n', (9387, 9390), False, 'from scipy.sparse.sputils import isshape, isintlike\n'), ((11706, 11719), 'scipy.sparse.isspmatrix', 'isspmatrix', (['A'], {}), '(A)\n', (11716, 11719), False, 'from scipy.sparse import isspmatrix\n'), ((5385, 5398), 'numpy.asarray', 'np.asarray', (['x'], {}), '(x)\n', (5395, 5398), True, 'import numpy as np\n'), ((11642, 11655), 'numpy.asarray', 'np.asarray', (['A'], {}), '(A)\n', (11652, 11655), True, 'import numpy as np\n')] |
"""
Various density standards.
"""
from numpy import array
# Visual density is typically used on grey patches. Take a reading and get
# the density values of the Red, Green, and Blue filters. If the difference
# between the highest and lowest value is less than or equal to the value
# below, return the density reading calculated against the ISO Visual spectral
# weighting curve. The X-Rite 500 uses a thresh of 0.05, the X-Rite i1 appears
# to use 0.08.
VISUAL_DENSITY_THRESH = 0.08
ANSI_STATUS_A_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.37,
43.45,
100.00,
74.30,
40.18,
19.32,
7.94,
3.56,
1.46,
0.60,
0.24,
0.09,
0.04,
0.01,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_A_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.04,
6.64,
60.53,
100.00,
80.54,
44.06,
16.63,
4.06,
0.58,
0.04,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_A_BLUE = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
4.00,
65.92,
100.00,
81.66,
41.69,
10.96,
0.79,
0.04,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_E_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
0.06,
0.45,
29.99,
100.00,
84.92,
54.95,
25.00,
10.00,
5.00,
1.50,
0.50,
0.30,
0.15,
0.05,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_E_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
1.00,
5.00,
27.99,
68.08,
92.04,
100.00,
87.90,
66.07,
41.98,
21.98,
8.99,
2.50,
0.70,
0.09,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_E_BLUE = array((
0.00,
0.00,
0.00,
0.01,
0.27,
2.70,
13.00,
29.99,
59.98,
82.04,
100.00,
90.99,
76.03,
46.99,
17.99,
6.00,
0.80,
0.05,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_M_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.13,
30.13,
100.00,
79.25,
37.84,
17.86,
7.50,
3.10,
1.26,
0.49,
0.19,
0.07,
0.03,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_M_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
0.16,
1.43,
6.37,
18.71,
42.27,
74.47,
100.00,
98.86,
65.77,
28.71,
8.22,
1.49,
0.17,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_M_BLUE = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.13,
12.91,
42.85,
74.30,
100.00,
90.16,
55.34,
22.03,
5.53,
0.98,
0.07,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_T_RED = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.06,
0.45,
29.99,
100.00,
84.92,
54.95,
25.00,
10.00,
5.00,
1.50,
0.50,
0.30,
0.15,
0.05,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_T_GREEN = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
1.00,
5.00,
27.99,
68.08,
92.04,
100.00,
87.90,
66.07,
41.98,
21.98,
8.99,
2.50,
0.70,
0.09,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ANSI_STATUS_T_BLUE = array((
0.00,
0.01,
0.02,
0.10,
0.30,
1.50,
6.00,
16.98,
39.99,
59.98,
82.04,
93.97,
100.00,
97.05,
84.92,
65.01,
39.99,
17.99,
5.00,
0.20,
0.04,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
TYPE1 = array((
0.00,
0.00,
0.01,
0.04,
0.72,
28.84,
100.00,
28.84,
0.72,
0.04,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
TYPE2 = array((
0.01,
0.51,
19.05,
38.28,
57.54,
70.96,
82.41,
90.36,
97.27,
100.00,
97.72,
89.33,
73.11,
55.34,
38.19,
22.44,
9.84,
2.52,
0.64,
0.16,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
ISO_VISUAL = array((
0.00,
0.00,
0.00,
0.00,
0.00,
0.00,
0.01,
0.02,
0.08,
0.28,
0.65,
1.23,
2.22,
3.82,
6.58,
10.99,
18.88,
32.58,
50.35,
66.83,
80.35,
90.57,
97.50,
100.00,
97.50,
90.36,
79.80,
67.14,
53.83,
39.17,
27.10,
17.30,
10.30,
5.61,
3.09,
1.54,
0.80,
0.42,
0.22,
0.11,
0.05,
0.03,
0.01,
0.01,
0.00,
0.00,
0.00,
0.00,
0.00,
0.00
))
| [
"numpy.array"
] | [((523, 811), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.37, 43.45, \n 100.0, 74.3, 40.18, 19.32, 7.94, 3.56, 1.46, 0.6, 0.24, 0.09, 0.04, \n 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.37, 43.45,\n 100.0, 74.3, 40.18, 19.32, 7.94, 3.56, 1.46, 0.6, 0.24, 0.09, 0.04, \n 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (528, 811), False, 'from numpy import array\n'), ((1115, 1401), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.04, 6.64, 60.53, 100.0, 80.54, 44.06, 16.63, 4.06, 0.58, 0.04, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.04, 6.64, 60.53, 100.0, 80.54, 44.06, 16.63, 4.06, 0.58, \n 0.04, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (1120, 1401), False, 'from numpy import array\n'), ((1706, 1990), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 65.92, 100.0, 81.66, 41.69, \n 10.96, 0.79, 0.04, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 4.0, 65.92, 100.0, 81.66, \n 41.69, 10.96, 0.79, 0.04, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (1711, 1990), False, 'from numpy import array\n'), ((2296, 2582), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.06, 0.45, 29.99, 100.0, \n 84.92, 54.95, 25.0, 10.0, 5.0, 1.5, 0.5, 0.3, 0.15, 0.05, 0.01, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.06, 0.45, 29.99, 100.0,\n 84.92, 54.95, 25.0, 10.0, 5.0, 1.5, 0.5, 0.3, 0.15, 0.05, 0.01, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (2301, 2582), False, 'from numpy import array\n'), ((2889, 3179), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 1.0,\n 5.0, 27.99, 68.08, 92.04, 100.0, 87.9, 66.07, 41.98, 21.98, 8.99, 2.5, \n 0.7, 0.09, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.01, 1.0, 5.0, 27.99, 68.08, 92.04, 100.0, 87.9, 66.07, 41.98, 21.98, \n 8.99, 2.5, 0.7, 0.09, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (2894, 3179), False, 'from numpy import array\n'), ((3483, 3776), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.01, 0.27, 2.7, 13.0, 29.99, 59.98, 82.04, 100.0, 90.99, \n 76.03, 46.99, 17.99, 6.0, 0.8, 0.05, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.01, 0.27, 2.7, 13.0, 29.99, 59.98, 82.04, 100.0, \n 90.99, 76.03, 46.99, 17.99, 6.0, 0.8, 0.05, 0.01, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (3488, 3776), False, 'from numpy import array\n'), ((4077, 4365), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13, \n 30.13, 100.0, 79.25, 37.84, 17.86, 7.5, 3.1, 1.26, 0.49, 0.19, 0.07, \n 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.13, 30.13, 100.0, 79.25, 37.84, 17.86, 7.5, 3.1, 1.26, 0.49, 0.19, \n 0.07, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (4082, 4365), False, 'from numpy import array\n'), ((4669, 4962), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, \n 0.16, 1.43, 6.37, 18.71, 42.27, 74.47, 100.0, 98.86, 65.77, 28.71, 8.22,\n 1.49, 0.17, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.01, 0.16, 1.43, 6.37, 18.71, 42.27, 74.47, 100.0, 98.86, 65.77, 28.71,\n 8.22, 1.49, 0.17, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (4674, 4962), False, 'from numpy import array\n'), ((5262, 5551), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13, 12.91, 42.85, 74.3, 100.0, 90.16,\n 55.34, 22.03, 5.53, 0.98, 0.07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.13, 12.91, 42.85, 74.3, 100.0, \n 90.16, 55.34, 22.03, 5.53, 0.98, 0.07, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (5267, 5551), False, 'from numpy import array\n'), ((5854, 6140), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06, 0.45, 29.99, 100.0, 84.92,\n 54.95, 25.0, 10.0, 5.0, 1.5, 0.5, 0.3, 0.15, 0.05, 0.01, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.06, 0.45, 29.99, 100.0, \n 84.92, 54.95, 25.0, 10.0, 5.0, 1.5, 0.5, 0.3, 0.15, 0.05, 0.01, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (5859, 6140), False, 'from numpy import array\n'), ((6447, 6735), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0,\n 5.0, 27.99, 68.08, 92.04, 100.0, 87.9, 66.07, 41.98, 21.98, 8.99, 2.5, \n 0.7, 0.09, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 1.0, 5.0, 27.99, 68.08, 92.04, 100.0, 87.9, 66.07, 41.98, 21.98, 8.99, \n 2.5, 0.7, 0.09, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (6452, 6735), False, 'from numpy import array\n'), ((7041, 7338), 'numpy.array', 'array', (['(0.0, 0.01, 0.02, 0.1, 0.3, 1.5, 6.0, 16.98, 39.99, 59.98, 82.04, 93.97, \n 100.0, 97.05, 84.92, 65.01, 39.99, 17.99, 5.0, 0.2, 0.04, 0.0, 0.0, 0.0,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.01, 0.02, 0.1, 0.3, 1.5, 6.0, 16.98, 39.99, 59.98, 82.04, \n 93.97, 100.0, 97.05, 84.92, 65.01, 39.99, 17.99, 5.0, 0.2, 0.04, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (7046, 7338), False, 'from numpy import array\n'), ((7625, 7909), 'numpy.array', 'array', (['(0.0, 0.0, 0.01, 0.04, 0.72, 28.84, 100.0, 28.84, 0.72, 0.04, 0.01, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.01, 0.04, 0.72, 28.84, 100.0, 28.84, 0.72, 0.04, 0.01, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (7630, 7909), False, 'from numpy import array\n'), ((8201, 8507), 'numpy.array', 'array', (['(0.01, 0.51, 19.05, 38.28, 57.54, 70.96, 82.41, 90.36, 97.27, 100.0, 97.72,\n 89.33, 73.11, 55.34, 38.19, 22.44, 9.84, 2.52, 0.64, 0.16, 0.01, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0)'], {}), '((0.01, 0.51, 19.05, 38.28, 57.54, 70.96, 82.41, 90.36, 97.27, 100.0, \n 97.72, 89.33, 73.11, 55.34, 38.19, 22.44, 9.84, 2.52, 0.64, 0.16, 0.01,\n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0))\n', (8206, 8507), False, 'from numpy import array\n'), ((8793, 9118), 'numpy.array', 'array', (['(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.02, 0.08, 0.28, 0.65, 1.23, 2.22, \n 3.82, 6.58, 10.99, 18.88, 32.58, 50.35, 66.83, 80.35, 90.57, 97.5, \n 100.0, 97.5, 90.36, 79.8, 67.14, 53.83, 39.17, 27.1, 17.3, 10.3, 5.61, \n 3.09, 1.54, 0.8, 0.42, 0.22, 0.11, 0.05, 0.03, 0.01, 0.01, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0)'], {}), '((0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.01, 0.02, 0.08, 0.28, 0.65, 1.23, \n 2.22, 3.82, 6.58, 10.99, 18.88, 32.58, 50.35, 66.83, 80.35, 90.57, 97.5,\n 100.0, 97.5, 90.36, 79.8, 67.14, 53.83, 39.17, 27.1, 17.3, 10.3, 5.61, \n 3.09, 1.54, 0.8, 0.42, 0.22, 0.11, 0.05, 0.03, 0.01, 0.01, 0.0, 0.0, \n 0.0, 0.0, 0.0, 0.0))\n', (8798, 9118), False, 'from numpy import array\n')] |
# Software License Agreement (BSD License)
#
# Copyright (c) 2011, <NAME>, Inc.
# Copyright (c) 2016, <NAME>.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of <NAME>, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import sensor_msgs.msg
import sys
class CvBridgeError(TypeError):
"""
This is the error raised by :class:`cv_bridge.CvBridge` methods when they fail.
"""
pass
class CvBridge(object):
"""
The CvBridge is an object that converts between OpenCV Images and ROS Image messages.
.. doctest::
:options: -ELLIPSIS, +NORMALIZE_WHITESPACE
>>> import cv2
>>> import numpy as np
>>> from cv_bridge import CvBridge
>>> br = CvBridge()
>>> dtype, n_channels = br.encoding_as_cvtype2('8UC3')
>>> im = np.ndarray(shape=(480, 640, n_channels), dtype=dtype)
>>> msg = br.cv2_to_imgmsg(im) # Convert the image to a message
>>> im2 = br.imgmsg_to_cv2(msg) # Convert the message to a new image
>>> cmprsmsg = br.cv2_to_compressed_imgmsg(im) # Convert the image to a compress message
>>> im22 = br.compressed_imgmsg_to_cv2(msg) # Convert the compress message to a new image
>>> cv2.imwrite("this_was_a_message_briefly.png", im2)
"""
def __init__(self):
import cv2
self.cvtype_to_name = {}
self.cvdepth_to_numpy_depth = {cv2.CV_8U: 'uint8', cv2.CV_8S: 'int8', cv2.CV_16U: 'uint16',
cv2.CV_16S: 'int16', cv2.CV_32S:'int32', cv2.CV_32F:'float32',
cv2.CV_64F: 'float64'}
for t in ["8U", "8S", "16U", "16S", "32S", "32F", "64F"]:
for c in [1, 2, 3, 4]:
nm = "%sC%d" % (t, c)
self.cvtype_to_name[getattr(cv2, "CV_%s" % nm)] = nm
self.numpy_type_to_cvtype = {'uint8': '8U', 'int8': '8S', 'uint16': '16U',
'int16': '16S', 'int32': '32S', 'float32': '32F',
'float64': '64F'}
self.numpy_type_to_cvtype.update(dict((v, k) for (k, v) in self.numpy_type_to_cvtype.items()))
def dtype_with_channels_to_cvtype2(self, dtype, n_channels):
return '%sC%d' % (self.numpy_type_to_cvtype[dtype.name], n_channels)
def cvtype2_to_dtype_with_channels(self, cvtype):
return self.cvdepth_to_numpy_depth[0], 3
# from cv_bridge.boost.cv_bridge_boost import CV_MAT_CNWrap, CV_MAT_DEPTHWrap
# return self.cvdepth_to_numpy_depth[CV_MAT_DEPTHWrap(cvtype)], CV_MAT_CNWrap(cvtype)
def encoding_to_cvtype2(self, encoding):
import cv2
if(encoding == 'bgr8' or encoding == 'rgb8'):
return cv2.CV_8UC3
else:
raise CvBridgeError(e)
# from cv_bridge.boost.cv_bridge_boost import getCvType
# try:
# return getCvType(encoding)
# except RuntimeError as e:
# raise CvBridgeError(e)
def encoding_to_dtype_with_channels(self, encoding):
return self.cvtype2_to_dtype_with_channels(self.encoding_to_cvtype2(encoding))
def compressed_imgmsg_to_cv2(self, cmprs_img_msg, desired_encoding = "passthrough"):
"""
Convert a sensor_msgs::CompressedImage message to an OpenCV :cpp:type:`cv::Mat`.
:param cmprs_img_msg: A :cpp:type:`sensor_msgs::CompressedImage` message
:param desired_encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: :cpp:type:`cv::Mat`
:raises CvBridgeError: when conversion is not possible.
If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg.
Otherwise desired_encoding must be one of the standard image encodings
This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
If the image only has one channel, the shape has size 2 (width and height)
"""
import cv2
import numpy as np
str_msg = cmprs_img_msg.data
buf = np.ndarray(shape=(1, len(str_msg)),
dtype=np.uint8, buffer=cmprs_img_msg.data)
im = cv2.imdecode(buf, cv2.IMREAD_ANYCOLOR)
if desired_encoding == "passthrough":
return im
from cv_bridge.boost.cv_bridge_boost import cvtColor2
try:
res = cvtColor2(im, "bgr8", desired_encoding)
except RuntimeError as e:
raise CvBridgeError(e)
return res
def imgmsg_to_cv2(self, img_msg, desired_encoding = "passthrough"):
"""
Convert a sensor_msgs::Image message to an OpenCV :cpp:type:`cv::Mat`.
:param img_msg: A :cpp:type:`sensor_msgs::Image` message
:param desired_encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: :cpp:type:`cv::Mat`
:raises CvBridgeError: when conversion is not possible.
If desired_encoding is ``"passthrough"``, then the returned image has the same format as img_msg.
Otherwise desired_encoding must be one of the standard image encodings
This function returns an OpenCV :cpp:type:`cv::Mat` message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
If the image only has one channel, the shape has size 2 (width and height)
"""
import cv2
import numpy as np
dtype, n_channels = self.encoding_to_dtype_with_channels(img_msg.encoding)
dtype = np.dtype(dtype)
dtype = dtype.newbyteorder('>' if img_msg.is_bigendian else '<')
if n_channels == 1:
im = np.ndarray(shape=(img_msg.height, img_msg.width),
dtype=dtype, buffer=img_msg.data)
else:
im = np.ndarray(shape=(img_msg.height, img_msg.width, n_channels),
dtype=dtype, buffer=img_msg.data)
# If the byt order is different between the message and the system.
if img_msg.is_bigendian == (sys.byteorder == 'little'):
im = im.byteswap().newbyteorder()
if desired_encoding == "passthrough":
return im
from cv_bridge.boost.cv_bridge_boost import cvtColor2
try:
res = cvtColor2(im, img_msg.encoding, desired_encoding)
except RuntimeError as e:
raise CvBridgeError(e)
return res
def cv2_to_compressed_imgmsg(self, cvim, dst_format = "jpg"):
"""
Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::CompressedImage message.
:param cvim: An OpenCV :cpp:type:`cv::Mat`
:param dst_format: The format of the image data, one of the following strings:
* from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html
* from http://docs.opencv.org/2.4/modules/highgui/doc/reading_and_writing_images_and_video.html#Mat imread(const string& filename, int flags)
* bmp, dib
* jpeg, jpg, jpe
* jp2
* png
* pbm, pgm, ppm
* sr, ras
* tiff, tif
:rtype: A sensor_msgs.msg.CompressedImage message
:raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``format``
This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
"""
import cv2
import numpy as np
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
cmprs_img_msg = sensor_msgs.msg.CompressedImage()
cmprs_img_msg.format = dst_format
ext_format = '.' + dst_format
try:
cmprs_img_msg.data = np.array(cv2.imencode(ext_format, cvim)[1]).tostring()
except RuntimeError as e:
raise CvBridgeError(e)
return cmprs_img_msg
def cv2_to_imgmsg(self, cvim, encoding = "passthrough"):
"""
Convert an OpenCV :cpp:type:`cv::Mat` type to a ROS sensor_msgs::Image message.
:param cvim: An OpenCV :cpp:type:`cv::Mat`
:param encoding: The encoding of the image data, one of the following strings:
* ``"passthrough"``
* one of the standard strings in sensor_msgs/image_encodings.h
:rtype: A sensor_msgs.msg.Image message
:raises CvBridgeError: when the ``cvim`` has a type that is incompatible with ``encoding``
If encoding is ``"passthrough"``, then the message has the same encoding as the image's OpenCV type.
Otherwise desired_encoding must be one of the standard image encodings
This function returns a sensor_msgs::Image message on success, or raises :exc:`cv_bridge.CvBridgeError` on failure.
"""
import cv2
import numpy as np
if not isinstance(cvim, (np.ndarray, np.generic)):
raise TypeError('Your input type is not a numpy array')
img_msg = sensor_msgs.msg.Image()
img_msg.height = cvim.shape[0]
img_msg.width = cvim.shape[1]
if len(cvim.shape) < 3:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, 1)
else:
cv_type = self.dtype_with_channels_to_cvtype2(cvim.dtype, cvim.shape[2])
if encoding == "passthrough":
img_msg.encoding = cv_type
else:
img_msg.encoding = encoding
# Verify that the supplied encoding is compatible with the type of the OpenCV image
if self.cvtype_to_name[self.encoding_to_cvtype2(encoding)] != cv_type:
raise CvBridgeError("encoding specified as %s, but image has incompatible type %s" % (encoding, cv_type))
if cvim.dtype.byteorder == '>':
img_msg.is_bigendian = True
img_msg.data = cvim.tostring()
img_msg.step = len(img_msg.data) // img_msg.height
return img_msg
| [
"cv2.imencode",
"cv_bridge.boost.cv_bridge_boost.cvtColor2",
"numpy.ndarray",
"cv2.imdecode",
"numpy.dtype"
] | [((5776, 5814), 'cv2.imdecode', 'cv2.imdecode', (['buf', 'cv2.IMREAD_ANYCOLOR'], {}), '(buf, cv2.IMREAD_ANYCOLOR)\n', (5788, 5814), False, 'import cv2\n'), ((7204, 7219), 'numpy.dtype', 'np.dtype', (['dtype'], {}), '(dtype)\n', (7212, 7219), True, 'import numpy as np\n'), ((5979, 6018), 'cv_bridge.boost.cv_bridge_boost.cvtColor2', 'cvtColor2', (['im', '"""bgr8"""', 'desired_encoding'], {}), "(im, 'bgr8', desired_encoding)\n", (5988, 6018), False, 'from cv_bridge.boost.cv_bridge_boost import cvtColor2\n'), ((7338, 7426), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(img_msg.height, img_msg.width)', 'dtype': 'dtype', 'buffer': 'img_msg.data'}), '(shape=(img_msg.height, img_msg.width), dtype=dtype, buffer=\n img_msg.data)\n', (7348, 7426), True, 'import numpy as np\n'), ((7480, 7579), 'numpy.ndarray', 'np.ndarray', ([], {'shape': '(img_msg.height, img_msg.width, n_channels)', 'dtype': 'dtype', 'buffer': 'img_msg.data'}), '(shape=(img_msg.height, img_msg.width, n_channels), dtype=dtype,\n buffer=img_msg.data)\n', (7490, 7579), True, 'import numpy as np\n'), ((7953, 8002), 'cv_bridge.boost.cv_bridge_boost.cvtColor2', 'cvtColor2', (['im', 'img_msg.encoding', 'desired_encoding'], {}), '(im, img_msg.encoding, desired_encoding)\n', (7962, 8002), False, 'from cv_bridge.boost.cv_bridge_boost import cvtColor2\n'), ((9500, 9530), 'cv2.imencode', 'cv2.imencode', (['ext_format', 'cvim'], {}), '(ext_format, cvim)\n', (9512, 9530), False, 'import cv2\n')] |
#!/usr/bin/env python
import numpy
from geoh5 import kea
def main():
# create some data
data = numpy.random.randint(0, 256, (6, 100, 100)).astype('uint8')
count, height, width = data.shape
kwargs = {'width': width,
'height': height,
'count': count,
'dtype': data.dtype.name,
'compression': 2,
'no_data': 0,
'chunks': (25, 25),
'blocksize': 25}
# write to disk
with kea.open('file-1.kea', 'w', **kwargs) as src:
src.write(data, bands=range(1, count+1))
# re-open as a new file object
with kea.open('file-1.kea', 'r') as src:
# Read the first band
src.read(1)
# Read bands [4, 3, 2] and return in that order
data = src.read([4,3,2])
kwargs = {'width': src.width,
'height': src.height,
'count': 3,
'transform': src.transform,
'crs': src.crs,
'compression': 4,
'no_data': src.no_data[1],
'chunks': (50, 50),
'blocksize': 50,
'dtype': src.dtype}
# create a new output file
with kea.open('file-2.kea', 'w', **kwargs) as out_src:
# Write the first band of data into band 3 on disk, etc..
out_src.write(data, bands=[3,2,1])
if __name__ == '__main__':
main()
| [
"numpy.random.randint",
"geoh5.kea.open"
] | [((500, 537), 'geoh5.kea.open', 'kea.open', (['"""file-1.kea"""', '"""w"""'], {}), "('file-1.kea', 'w', **kwargs)\n", (508, 537), False, 'from geoh5 import kea\n'), ((640, 667), 'geoh5.kea.open', 'kea.open', (['"""file-1.kea"""', '"""r"""'], {}), "('file-1.kea', 'r')\n", (648, 667), False, 'from geoh5 import kea\n'), ((106, 149), 'numpy.random.randint', 'numpy.random.randint', (['(0)', '(256)', '(6, 100, 100)'], {}), '(0, 256, (6, 100, 100))\n', (126, 149), False, 'import numpy\n'), ((1258, 1295), 'geoh5.kea.open', 'kea.open', (['"""file-2.kea"""', '"""w"""'], {}), "('file-2.kea', 'w', **kwargs)\n", (1266, 1295), False, 'from geoh5 import kea\n')] |
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for module search utility."""
import os
import unittest
import numpy as np
from absl.testing import flagsaver
import tensorflow.compat.v2 as tf
import tensorflow_datasets as tfds
from tensorflow_hub.tools.module_search import search
class ImageChannelMeanModel(tf.train.Checkpoint):
@tf.function(input_signature=[
tf.TensorSpec(shape=(None, 224, 224, 3), dtype=tf.float32),
])
def __call__(self, images):
return tf.math.reduce_mean(images, [1, 2])
def fake_image_dataset(*args, **kwargs):
num_examples = 30
return tf.data.Dataset.from_generator(
lambda: ({
"image": np.ones(shape=(32, 32, 3), dtype=np.uint8),
"label": i % 10,
} for i in range(num_examples)),
output_types={"image": tf.uint8, "label": tf.int64},
output_shapes={"image": (32, 32, 3), "label": ()},
)
class SearchTest(tf.test.TestCase):
def _create_image_models(self):
path1 = os.path.join(self.get_temp_dir(), "model1")
path2 = os.path.join(self.get_temp_dir(), "model2")
tf.saved_model.save(ImageChannelMeanModel(), path1)
tf.saved_model.save(ImageChannelMeanModel(), path2)
return [path1, path2]
@unittest.mock.patch.object(search.utils.tfds, "load",
side_effect=fake_image_dataset)
def test_run_e2e(self, mock_tfds_load):
if not tf.executing_eagerly():
self.skipTest("Test requires eager mode.")
modules = self._create_image_models()
#tfds.load = fake_image_dataset
with flagsaver.flagsaver(
dataset="cifar100",
module=modules,
):
search.main([])
if __name__ == '__main__':
tf.test.main()
| [
"tensorflow_hub.tools.module_search.search.main",
"numpy.ones",
"tensorflow.compat.v2.executing_eagerly",
"tensorflow.compat.v2.TensorSpec",
"tensorflow.compat.v2.test.main",
"unittest.mock.patch.object",
"tensorflow.compat.v2.math.reduce_mean",
"absl.testing.flagsaver.flagsaver"
] | [((1869, 1959), 'unittest.mock.patch.object', 'unittest.mock.patch.object', (['search.utils.tfds', '"""load"""'], {'side_effect': 'fake_image_dataset'}), "(search.utils.tfds, 'load', side_effect=\n fake_image_dataset)\n", (1895, 1959), False, 'import unittest\n'), ((2331, 2345), 'tensorflow.compat.v2.test.main', 'tf.test.main', ([], {}), '()\n', (2343, 2345), True, 'import tensorflow.compat.v2 as tf\n'), ((1136, 1171), 'tensorflow.compat.v2.math.reduce_mean', 'tf.math.reduce_mean', (['images', '[1, 2]'], {}), '(images, [1, 2])\n', (1155, 1171), True, 'import tensorflow.compat.v2 as tf\n'), ((2038, 2060), 'tensorflow.compat.v2.executing_eagerly', 'tf.executing_eagerly', ([], {}), '()\n', (2058, 2060), True, 'import tensorflow.compat.v2 as tf\n'), ((2198, 2253), 'absl.testing.flagsaver.flagsaver', 'flagsaver.flagsaver', ([], {'dataset': '"""cifar100"""', 'module': 'modules'}), "(dataset='cifar100', module=modules)\n", (2217, 2253), False, 'from absl.testing import flagsaver\n'), ((2284, 2299), 'tensorflow_hub.tools.module_search.search.main', 'search.main', (['[]'], {}), '([])\n', (2295, 2299), False, 'from tensorflow_hub.tools.module_search import search\n'), ((1030, 1088), 'tensorflow.compat.v2.TensorSpec', 'tf.TensorSpec', ([], {'shape': '(None, 224, 224, 3)', 'dtype': 'tf.float32'}), '(shape=(None, 224, 224, 3), dtype=tf.float32)\n', (1043, 1088), True, 'import tensorflow.compat.v2 as tf\n'), ((1312, 1354), 'numpy.ones', 'np.ones', ([], {'shape': '(32, 32, 3)', 'dtype': 'np.uint8'}), '(shape=(32, 32, 3), dtype=np.uint8)\n', (1319, 1354), True, 'import numpy as np\n')] |
"""Common utilities for Numba operations with groupby ops"""
import inspect
from typing import Any, Callable, Dict, Optional, Tuple
import numpy as np
from pandas._typing import Scalar
from pandas.compat._optional import import_optional_dependency
from pandas.core.util.numba_ import (
NUMBA_FUNC_CACHE,
NumbaUtilError,
get_jit_arguments,
jit_user_function,
)
def validate_udf(func: Callable) -> None:
"""
Validate user defined function for ops when using Numba with groupby ops.
The first signature arguments should include:
def f(values, index, ...):
...
Parameters
----------
func : function, default False
user defined function
Returns
-------
None
Raises
------
NumbaUtilError
"""
udf_signature = list(inspect.signature(func).parameters.keys())
expected_args = ["values", "index"]
min_number_args = len(expected_args)
if (
len(udf_signature) < min_number_args
or udf_signature[:min_number_args] != expected_args
):
raise NumbaUtilError(
f"The first {min_number_args} arguments to {func.__name__} must be "
f"{expected_args}"
)
def generate_numba_agg_func(
args: Tuple,
kwargs: Dict[str, Any],
func: Callable[..., Scalar],
engine_kwargs: Optional[Dict[str, bool]],
) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]:
"""
Generate a numba jitted agg function specified by values from engine_kwargs.
1. jit the user's function
2. Return a groupby agg function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the groupby evaluation loop.
Parameters
----------
args : tuple
*args to be passed into the function
kwargs : dict
**kwargs to be passed into the function
func : function
function to be applied to each window and will be JITed
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
Returns
-------
Numba function
"""
nopython, nogil, parallel = get_jit_arguments(engine_kwargs, kwargs)
validate_udf(func)
cache_key = (func, "groupby_agg")
if cache_key in NUMBA_FUNC_CACHE:
return NUMBA_FUNC_CACHE[cache_key]
numba_func = jit_user_function(func, nopython, nogil, parallel)
numba = import_optional_dependency("numba")
if parallel:
loop_range = numba.prange
else:
loop_range = range
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def group_agg(
values: np.ndarray,
index: np.ndarray,
begin: np.ndarray,
end: np.ndarray,
num_groups: int,
num_columns: int,
) -> np.ndarray:
result = np.empty((num_groups, num_columns))
for i in loop_range(num_groups):
group_index = index[begin[i] : end[i]]
for j in loop_range(num_columns):
group = values[begin[i] : end[i], j]
result[i, j] = numba_func(group, group_index, *args)
return result
return group_agg
def generate_numba_transform_func(
args: Tuple,
kwargs: Dict[str, Any],
func: Callable[..., np.ndarray],
engine_kwargs: Optional[Dict[str, bool]],
) -> Callable[[np.ndarray, np.ndarray, np.ndarray, np.ndarray, int, int], np.ndarray]:
"""
Generate a numba jitted transform function specified by values from engine_kwargs.
1. jit the user's function
2. Return a groupby transform function with the jitted function inline
Configurations specified in engine_kwargs apply to both the user's
function _AND_ the groupby evaluation loop.
Parameters
----------
args : tuple
*args to be passed into the function
kwargs : dict
**kwargs to be passed into the function
func : function
function to be applied to each window and will be JITed
engine_kwargs : dict
dictionary of arguments to be passed into numba.jit
Returns
-------
Numba function
"""
nopython, nogil, parallel = get_jit_arguments(engine_kwargs, kwargs)
validate_udf(func)
cache_key = (func, "groupby_transform")
if cache_key in NUMBA_FUNC_CACHE:
return NUMBA_FUNC_CACHE[cache_key]
numba_func = jit_user_function(func, nopython, nogil, parallel)
numba = import_optional_dependency("numba")
if parallel:
loop_range = numba.prange
else:
loop_range = range
@numba.jit(nopython=nopython, nogil=nogil, parallel=parallel)
def group_transform(
values: np.ndarray,
index: np.ndarray,
begin: np.ndarray,
end: np.ndarray,
num_groups: int,
num_columns: int,
) -> np.ndarray:
result = np.empty((len(values), num_columns))
for i in loop_range(num_groups):
group_index = index[begin[i] : end[i]]
for j in loop_range(num_columns):
group = values[begin[i] : end[i], j]
result[begin[i] : end[i], j] = numba_func(group, group_index, *args)
return result
return group_transform
| [
"pandas.core.util.numba_.get_jit_arguments",
"inspect.signature",
"pandas.core.util.numba_.jit_user_function",
"numpy.empty",
"pandas.core.util.numba_.NumbaUtilError",
"pandas.compat._optional.import_optional_dependency"
] | [((2170, 2210), 'pandas.core.util.numba_.get_jit_arguments', 'get_jit_arguments', (['engine_kwargs', 'kwargs'], {}), '(engine_kwargs, kwargs)\n', (2187, 2210), False, 'from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, NumbaUtilError, get_jit_arguments, jit_user_function\n'), ((2372, 2422), 'pandas.core.util.numba_.jit_user_function', 'jit_user_function', (['func', 'nopython', 'nogil', 'parallel'], {}), '(func, nopython, nogil, parallel)\n', (2389, 2422), False, 'from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, NumbaUtilError, get_jit_arguments, jit_user_function\n'), ((2435, 2470), 'pandas.compat._optional.import_optional_dependency', 'import_optional_dependency', (['"""numba"""'], {}), "('numba')\n", (2461, 2470), False, 'from pandas.compat._optional import import_optional_dependency\n'), ((4167, 4207), 'pandas.core.util.numba_.get_jit_arguments', 'get_jit_arguments', (['engine_kwargs', 'kwargs'], {}), '(engine_kwargs, kwargs)\n', (4184, 4207), False, 'from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, NumbaUtilError, get_jit_arguments, jit_user_function\n'), ((4375, 4425), 'pandas.core.util.numba_.jit_user_function', 'jit_user_function', (['func', 'nopython', 'nogil', 'parallel'], {}), '(func, nopython, nogil, parallel)\n', (4392, 4425), False, 'from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, NumbaUtilError, get_jit_arguments, jit_user_function\n'), ((4438, 4473), 'pandas.compat._optional.import_optional_dependency', 'import_optional_dependency', (['"""numba"""'], {}), "('numba')\n", (4464, 4473), False, 'from pandas.compat._optional import import_optional_dependency\n'), ((1068, 1177), 'pandas.core.util.numba_.NumbaUtilError', 'NumbaUtilError', (['f"""The first {min_number_args} arguments to {func.__name__} must be {expected_args}"""'], {}), "(\n f'The first {min_number_args} arguments to {func.__name__} must be {expected_args}'\n )\n", (1082, 1177), False, 'from pandas.core.util.numba_ import NUMBA_FUNC_CACHE, NumbaUtilError, get_jit_arguments, jit_user_function\n'), ((2841, 2876), 'numpy.empty', 'np.empty', (['(num_groups, num_columns)'], {}), '((num_groups, num_columns))\n', (2849, 2876), True, 'import numpy as np\n'), ((809, 832), 'inspect.signature', 'inspect.signature', (['func'], {}), '(func)\n', (826, 832), False, 'import inspect\n')] |
from pathlib import Path
from dateutil.parser import parse
from datetime import timedelta
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from helper_funcs import *
def preprocesamiento_casos():
ts_global = {}
for file in Path('.').glob('*_global.csv'):
ts = ts_since_two_per_country(open_global_ts(file))
ts_name = file.name.split('_')[0]
ts_global[ts_name] = pd.concat(ts, axis=1)
first_case, last_case = ts_global['confirmed'].index[0].strftime('%Y-%m-%d'), ts_global['confirmed'].index[-1].strftime('%Y-%m-%d')
print(f'Series de tiempo Casos COVID para : {list(ts_global.keys())} desde {first_case} hasta {last_case}.\n')
return ts_global
def preprocesamiento_medidas(medidas_xls, ts_global):
# Se realizan algunos remplazos para seguir el formato de las series de tiempo de los casos
replace_cnames_indices = {
'Slovak Republic' : 'Slovakia',
'Czech Republic' : 'Czechia',
'Kyrgyz Republic' : 'Kyrgyzstan',
'Cape Verde': 'Cabo Verde',
'Taiwan' : 'Taiwan*',
'South Korea' : 'Korea, South',
'United States' : 'US'
}
# obtenemos solo medidas
medidas_ts = {}
for indice in medidas_xls.sheet_names[:-4]:
if 'flag' in indice:
continue
medida_ts = pd.read_excel(medidas_xls, indice)
for actual, remplazo in replace_cnames_indices.items():
medida_ts.loc[medida_ts['CountryName'] == actual, 'CountryName'] = remplazo
# Se eliminan las últimas 3 filas basura
medida_ts = medida_ts.drop(medida_ts.tail(3).index).dropna(thresh=1*len(medida_ts.columns)/5, axis=0)
# Se filtran por paises que esten la interseccion
countries_ts = set(medida_ts.CountryName.unique()).intersection(set(ts_global['confirmed'].columns))
medida_ts = medida_ts[medida_ts['CountryName'].isin(countries_ts)]
index_name = indice.split('_')[-1]
print(f'Para el indice {index_name: ^30} existen {len(countries_ts)} paises en Series de tiempo Medidas COVID y Casos COVID.')
# Utilizamos formato de series de tiempo (index: fecha, columnas: paises)
medida_ts = medida_ts.drop(labels='CountryCode', axis=1).set_index('CountryName').T
medida_ts.index = pd.to_datetime([parse(idx) for idx in medida_ts.index])
medida_ts.columns.name = ''
medidas_ts[index_name] = medida_ts
print('\n')
# obtenemos solo los indices
indices_ts = {}
countries_indices_int = {}
for indice in medidas_xls.sheet_names[-4:]:
indice_ts = pd.read_excel(medidas_xls, indice)
for actual, remplazo in replace_cnames_indices.items():
indice_ts.loc[indice_ts['CountryName'] == actual, 'CountryName'] = remplazo
# Se eliminan las últimas 3 filas basura
indice_ts = indice_ts.drop(indice_ts.tail(3).index).dropna(thresh=1*len(indice_ts.columns)/5, axis=0)
# Se filtran por paises que esten la interseccion
countries_ts = set(indice_ts.CountryName.unique()).intersection(set(ts_global['confirmed'].columns))
indice_ts = indice_ts[indice_ts['CountryName'].isin(countries_ts)]
index_name = indice.split('_')[-1]
print(f'Para el indice {index_name: ^30} existen {len(countries_ts)} paises en Series de tiempo Medidas COVID y Casos COVID.')
# Utilizamos formato de series de tiempo (index: fecha, columnas: paises)
indice_ts = indice_ts.drop(labels='CountryCode', axis=1).set_index('CountryName').T
indice_ts.index = pd.to_datetime([parse(idx) for idx in indice_ts.index])
indice_ts.columns.name = ''
indices_ts[index_name] = indice_ts
countries_indices_int[index_name] = countries_ts
return indices_ts, medidas_ts, countries_indices_int
def topics_df_common_countries(ts_global=None, wdi_ind=None, verbose=0):
topics_indName = pd.read_csv('WDISeries.csv')[['Topic', 'Indicator Name']]
health_topics = topics_indName[topics_indName['Topic'].str.contains("Health")]['Topic'].unique()
health_topics = np.delete(health_topics, 5)
health_topics = np.delete(health_topics, -1)
health_topics = np.delete(health_topics, -1)
health_topics = np.delete(health_topics, 0)
if verbose:
print('Se utilizan indicadores que tratan los siguientes temas:', health_topics)
selected_topic_indicators = topics_indName[topics_indName['Topic'].isin(health_topics)]
if wdi_ind is not None:
common_countries = set(wdi_ind[(wdi_ind['Indicator Name'].isin(selected_topic_indicators['Indicator Name']))]['Country Name'].unique()).intersection(set(ts_global['confirmed'].columns))
if verbose:
print('Existen', len(common_countries), 'paises en la interseccion de fuentes.\n')
return selected_topic_indicators, common_countries
return selected_topic_indicators
def preprocesamiento_wbd(ts_global):
wdi_ind = pd.read_csv('WDIData.csv')
wdi_ind.loc[wdi_ind['Country Name'] == 'United States', 'Country Name'] = 'US'
selected_topic_indicators, common_countries = topics_df_common_countries(ts_global=ts_global, wdi_ind=wdi_ind, verbose=1)
health_indicators = selected_topic_indicators['Indicator Name']
health_ind_df = wdi_ind[wdi_ind['Indicator Name'].isin(health_indicators) & wdi_ind['Country Name'].isin(common_countries)]
health_ind_df = health_ind_df.fillna(method='ffill', axis=1)
health_ind_df['2019'] = health_ind_df['2019'].apply(lambda x: pd.to_numeric(x, errors='coerce'))
health_ind = health_ind_df[['Country Name', 'Indicator Name', '2019']].reset_index(drop=True)
return health_ind
| [
"dateutil.parser.parse",
"pandas.read_csv",
"pathlib.Path",
"numpy.delete",
"pandas.to_numeric",
"pandas.read_excel",
"pandas.concat"
] | [((4232, 4259), 'numpy.delete', 'np.delete', (['health_topics', '(5)'], {}), '(health_topics, 5)\n', (4241, 4259), True, 'import numpy as np\n'), ((4280, 4308), 'numpy.delete', 'np.delete', (['health_topics', '(-1)'], {}), '(health_topics, -1)\n', (4289, 4308), True, 'import numpy as np\n'), ((4329, 4357), 'numpy.delete', 'np.delete', (['health_topics', '(-1)'], {}), '(health_topics, -1)\n', (4338, 4357), True, 'import numpy as np\n'), ((4378, 4405), 'numpy.delete', 'np.delete', (['health_topics', '(0)'], {}), '(health_topics, 0)\n', (4387, 4405), True, 'import numpy as np\n'), ((5101, 5127), 'pandas.read_csv', 'pd.read_csv', (['"""WDIData.csv"""'], {}), "('WDIData.csv')\n", (5112, 5127), True, 'import pandas as pd\n'), ((419, 440), 'pandas.concat', 'pd.concat', (['ts'], {'axis': '(1)'}), '(ts, axis=1)\n', (428, 440), True, 'import pandas as pd\n'), ((1344, 1378), 'pandas.read_excel', 'pd.read_excel', (['medidas_xls', 'indice'], {}), '(medidas_xls, indice)\n', (1357, 1378), True, 'import pandas as pd\n'), ((2719, 2753), 'pandas.read_excel', 'pd.read_excel', (['medidas_xls', 'indice'], {}), '(medidas_xls, indice)\n', (2732, 2753), True, 'import pandas as pd\n'), ((4052, 4080), 'pandas.read_csv', 'pd.read_csv', (['"""WDISeries.csv"""'], {}), "('WDISeries.csv')\n", (4063, 4080), True, 'import pandas as pd\n'), ((256, 265), 'pathlib.Path', 'Path', (['"""."""'], {}), "('.')\n", (260, 265), False, 'from pathlib import Path\n'), ((5671, 5704), 'pandas.to_numeric', 'pd.to_numeric', (['x'], {'errors': '"""coerce"""'}), "(x, errors='coerce')\n", (5684, 5704), True, 'import pandas as pd\n'), ((2392, 2402), 'dateutil.parser.parse', 'parse', (['idx'], {}), '(idx)\n', (2397, 2402), False, 'from dateutil.parser import parse\n'), ((3715, 3725), 'dateutil.parser.parse', 'parse', (['idx'], {}), '(idx)\n', (3720, 3725), False, 'from dateutil.parser import parse\n')] |
#
# Copyright (c) 2017-2019 AutoDeploy AI
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import, print_function
import sys
import json
import os
import pandas as pd
import numpy as np
FUNCTION_NAME_CLASSIFICATION = 'classification'
FUNCTION_NAME_REGRESSION = 'regression'
FUNCTION_NAME_CLUSTERING = 'clustering'
FUNCTION_NAME_UNKNOWN = 'unknown'
SUPPORTED_FUNCTION_NAMES = (FUNCTION_NAME_CLASSIFICATION, FUNCTION_NAME_REGRESSION, FUNCTION_NAME_CLUSTERING)
SUPPORTED_SERIALIZATIONS = ('pickle', 'joblib', 'spark', 'hdf5', 'xgboost', 'lightgbm', 'pmml', 'onnx', 'pt')
class BaseModel(object):
def __init__(self, model):
self.model = model
def is_support(self):
raise NotImplementedError()
def model_type(self):
raise NotImplementedError()
def model_version(self):
raise NotImplementedError()
def mining_function(self, y_test):
return FUNCTION_NAME_UNKNOWN
def serialization(self):
raise NotImplementedError()
def runtime(self):
return 'Python{major}{minor}'.format(major=sys.version_info[0], minor=sys.version_info[1])
def algorithm(self):
return self.model.__class__.__name__
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
raise NotImplementedError()
def predictors(self, x_test, data_test):
if x_test is None:
return []
result = []
if isinstance(x_test, np.ndarray) and x_test.ndim <= 2:
x_test = pd.DataFrame(x_test)
x_test.columns = ['x'+str(i) for i in range(0, len(x_test.columns))]
x_test = self._series_to_dataframe(x_test)
if isinstance(x_test, pd.DataFrame):
row = json.loads(x_test.iloc[0].to_json())
cols = row.keys()
for x in cols:
result.append({
'name': x,
'sample': row[x],
'type': type(row[x]).__name__
})
else: # numpy array with multiple dimensions than two
row = x_test[0]
result.append({
'name': 'tensor_input',
'sample': row.tolist(),
'type': x_test.dtype.name,
'shape': self._normalize_np_shape(x_test.shape)
})
return result
def targets(self, y_test, data_test):
if y_test is None:
return []
result = []
if isinstance(y_test, np.ndarray) and y_test.ndim <= 2:
y_test = pd.DataFrame(y_test)
y_test.columns = ['y'+str(i) for i in range(0, len(y_test.columns))]
y_test = self._series_to_dataframe(y_test)
if isinstance(y_test, pd.DataFrame):
row = json.loads(y_test.iloc[0].to_json())
cols = row.keys()
for x in cols:
result.append({
'name': x,
'sample': row[x],
'type': type(row[x]).__name__
})
else: # numpy array with multiple dimensions than two
row = y_test[0]
result.append({
'name': 'tensor_target',
'sample': row.tolist(),
'type': y_test.dtype.name,
'shape': self._normalize_np_shape(y_test.shape)
})
return result
def outputs(self, y_test, data_test, **kwargs):
return []
@staticmethod
def extract_major_minor_version(version):
result = version
elements = version.split('.')
if len(elements) > 2:
result = '{major}.{minor}'.format(major=elements[0], minor=elements[1])
return result
@staticmethod
def evaluate_metrics_by_sklearn(wrapped_model, x_test, y_test, input_function_name):
if x_test is None or y_test is None:
return {}
try:
function_name = input_function_name if input_function_name else wrapped_model.mining_function(y_test)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
y_pred = wrapped_model.model.predict(x_test)
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
y_pred = wrapped_model.model.predict(x_test)
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
@staticmethod
def _normalize_np_shape(shape):
result = None
if shape is not None and len(shape) > 1:
result = []
for idx, d in enumerate(shape):
if idx == 0:
result.append(None)
else:
result.append(d)
return result
@staticmethod
def _series_to_dataframe(data):
if isinstance(data, pd.Series):
return pd.DataFrame(data)
return data
def _test_data_to_ndarray(self, x_y_test, data_test):
data = self._to_dataframe(x_y_test, data_test)
if isinstance(data, pd.DataFrame):
return data.values
return data
@staticmethod
def _to_ndarray(data):
return data.values if isinstance(data, (pd.DataFrame, pd.Series)) else data
@staticmethod
def _to_dataframe(x_y_test, data_test):
if x_y_test is None and data_test is not None:
x_y_test = data_test.limit(1).toPandas()
if isinstance(x_y_test, pd.Series):
x_y_test = pd.DataFrame(x_y_test)
return x_y_test
def _infer_mining_function(self, y_test):
if y_test is None:
return FUNCTION_NAME_UNKNOWN
y_test = self._to_ndarray(y_test)
if y_test.ndim >= 2:
return FUNCTION_NAME_CLASSIFICATION if y_test.shape[y_test.ndim - 1] > 1 else FUNCTION_NAME_REGRESSION
# float numbers are treated as a regression problem
return FUNCTION_NAME_REGRESSION if y_test.dtype.kind in 'fc' else FUNCTION_NAME_CLASSIFICATION
@staticmethod
def _compatible_shape(shape1, shape2):
if len(shape1) != len(shape2):
return False
# could be tuple and list
shape1 = list(shape1)
shape2 = list(shape2)
if len(shape1) > 1:
return shape1[1:] == shape2[1:]
return shape1 == shape2
class CustomModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
def is_support(self):
return not isinstance(self.model, (str, bytes, bytearray))
def model_type(self):
return 'Custom'
def model_version(self):
return 'unknown'
def serialization(self):
return 'pickle'
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
return {}
class PMMLModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
self.pmml_model = None
def __del__(self):
if self.pmml_model:
try:
from pypmml import Model
Model.close()
except:
pass
def is_support(self):
try:
from pypmml import Model
model_content = self.model
if hasattr(self.model, 'read') and callable(self.model.read):
model_content = self.model.read()
if isinstance(model_content, (bytes, bytearray)):
model_content = model_content.decode('utf-8')
if isinstance(model_content, str):
# Check if a file path
if os.path.exists(model_content):
self.pmml_model = Model.fromFile(model_content)
else:
self.pmml_model = Model.fromString(model_content)
return True
else:
Model.close()
return False
except Exception as e:
return False
def model_type(self):
return 'PMML'
def model_version(self):
return None
def mining_function(self, y_test):
return self.pmml_model.functionName
def serialization(self):
return 'pmml'
def runtime(self):
return 'PyPMML'
def algorithm(self):
return self.pmml_model.modelElement
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
prediction_col = self.get_prediction_col()
if prediction_col is None:
return {}
# Convert spark df to Pandas
if data_test is not None:
try:
label_col = self.pmml_model.targetName
if not label_col:
return {}
pandas_data_test = data_test.toPandas()
y_test = pandas_data_test[label_col]
x_test = pandas_data_test
except:
return {}
if x_test is not None and y_test is not None:
try:
function_name = input_function_name if input_function_name else self.mining_function(y_test)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
y_pred = self.pmml_model.predict(x_test)
accuracy = accuracy_score(y_test, y_pred[prediction_col])
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
y_pred = self.pmml_model.predict(x_test)
explained_variance = explained_variance_score(y_test, y_pred[prediction_col])
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
return {}
def get_prediction_col(self):
output_fields = self.pmml_model.outputFields
for x in output_fields:
if x.feature == 'predictedValue':
return x.name
return None
def predictors(self, x_test, data_test):
result = []
row = None
x_test = self._to_dataframe(x_test, data_test)
if isinstance(x_test, pd.DataFrame):
row = json.loads(x_test.iloc[0].to_json())
for x in self.pmml_model.inputFields:
result.append(({
'name': x.name,
'sample': row.get(x.name) if row is not None else None,
'type': x.dataType
}))
return result
def targets(self, y_test, data_test):
result = []
row = None
y_test = self._to_dataframe(y_test, data_test)
if isinstance(y_test, pd.DataFrame):
row = json.loads(y_test.iloc[0].to_json())
for x in self.pmml_model.targetFields:
result.append(({
'name': x.name,
'sample': row.get(x.name) if row is not None else None,
'type': x.dataType
}))
return result
def outputs(self, y_test, data_test, **kwargs):
result = []
for x in self.pmml_model.outputFields:
result.append(({
'name': x.name,
'type': x.dataType
}))
return result
class ONNXModel(BaseModel):
def __init__(self, model):
super(ONNXModel, self).__init__(model)
self.onnx_model = None
self.sess = None
self._algorithm = None
def is_support(self):
try:
import onnx
if isinstance(self.model, onnx.ModelProto):
self.onnx_model = self.model
return True
if isinstance(self.model, (bytes, bytearray)):
onnx_model = onnx.load_model_from_string(self.model)
else:
# could be either readable or a file path
onnx_model = onnx.load_model(self.model)
onnx.checker.check_model(onnx_model)
self.onnx_model = onnx_model
return True
except Exception:
return False
def model_type(self):
return 'ONNX'
def model_version(self):
return None
def mining_function(self, y_test):
algorithm = self.algorithm()
if algorithm is not None:
if algorithm in ('LinearClassifier', 'SVMClassifier', 'TreeEnsembleClassifier'):
return FUNCTION_NAME_CLASSIFICATION
if algorithm in ('LinearRegressor', 'SVMRegressor', 'TreeEnsembleRegressor'):
return FUNCTION_NAME_REGRESSION
return self._infer_mining_function(y_test)
def serialization(self):
return 'onnx'
def runtime(self):
return 'ONNX Runtime'
def algorithm(self):
if self._algorithm is None:
use_onnx_ml = False
if self.onnx_model is not None:
graph = self.onnx_model.graph
for node in graph.node:
if node.domain == 'ai.onnx.ml':
use_onnx_ml = True
if node.op_type in ('LinearClassifier', 'LinearRegressor', 'SVMClassifier', 'SVMRegressor',
'TreeEnsembleClassifier', 'TreeEnsembleRegressor'):
self._algorithm = node.op_type
break
if self._algorithm is None and not use_onnx_ml:
self._algorithm = 'NeuralNetwork'
return self._algorithm
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
if x_test is None or y_test is None:
return {}
try:
function_name = input_function_name if input_function_name else self.mining_function(y_test)
# convert to numpy array if not
x_test = self._to_ndarray(x_test)
y_test = self._to_ndarray(y_test)
shape = y_test.shape
if len(shape) > 1 and shape[1] > 1:
y_test = np.argmax(y_test, axis=1)
sess = self._get_inference_session()
y_pred = None
if function_name in (FUNCTION_NAME_CLASSIFICATION, FUNCTION_NAME_REGRESSION) and len(
sess.get_inputs()) == 1:
input_name = sess.get_inputs()[0].name
y_pred = sess.run([sess.get_outputs()[0].name], {input_name: x_test.astype(np.float32)})[0]
y_pred = np.asarray(y_pred)
shape = y_pred.shape
if len(shape) > 1 and shape[1] > 1:
y_pred = np.argmax(y_pred, axis=1)
if y_pred is not None:
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except Exception as e:
return {}
def predictors(self, x_test, data_test):
result = []
sess = self._get_inference_session()
for x in sess.get_inputs():
result.append({
'name': x.name,
'type': x.type,
'shape': x.shape
})
# suppose there is only 1 tensor input
data = self._test_data_to_ndarray(x_test, data_test)
if data is not None and len(result) == 1:
if self._compatible_shape(data.shape, result[0]['shape']):
result[0]['sample'] = [data[0].tolist()]
return result
def targets(self, y_test, data_test):
return []
def outputs(self, y_test, data_test, **kwargs):
result = []
sess = self._get_inference_session()
for x in sess.get_outputs():
result.append({
'name': x.name,
'type': x.type,
'shape': x.shape
})
return result
def _get_inference_session(self):
if self.sess is None:
import onnxruntime as rt
self.sess = rt.InferenceSession(self.onnx_model.SerializeToString())
return self.sess
class SKLearnModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
def is_support(self):
try:
from sklearn.base import BaseEstimator
return isinstance(self.model, BaseEstimator)
except:
return False
def model_type(self):
return 'Scikit-learn'
def model_version(self):
import sklearn
return BaseModel.extract_major_minor_version(sklearn.__version__)
def mining_function(self, y_test):
from sklearn.base import is_classifier, is_regressor
if is_classifier(self.model):
return FUNCTION_NAME_CLASSIFICATION
if is_regressor(self.model):
return FUNCTION_NAME_REGRESSION
if getattr(self.model, "_estimator_type", None) == "clusterer":
return FUNCTION_NAME_CLUSTERING
return self._infer_mining_function(y_test)
def serialization(self):
return 'joblib'
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
return BaseModel.evaluate_metrics_by_sklearn(self, x_test, y_test, input_function_name)
class XGBoostModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
def is_support(self):
try:
import xgboost as xgb
return isinstance(self.model, xgb.Booster) or \
isinstance(self.model, xgb.XGBClassifier) or \
isinstance(self.model, xgb.XGBRegressor)
except:
return False
def is_sklearn_format(self):
import xgboost as xgb
return isinstance(self.model, xgb.XGBClassifier) or isinstance(self.model, xgb.XGBRegressor)
def model_type(self):
return 'XGBoost'
def model_version(self):
import xgboost as xgb
return BaseModel.extract_major_minor_version(xgb.__version__)
def mining_function(self, y_test):
import xgboost as xgb
if isinstance(self.model, xgb.XGBClassifier):
return FUNCTION_NAME_CLASSIFICATION
if isinstance(self.model, xgb.XGBRegressor):
return FUNCTION_NAME_REGRESSION
return self._infer_mining_function(y_test)
def serialization(self):
return 'joblib' if self.is_sklearn_format() else 'xgboost'
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
if x_test is None or y_test is None:
return {}
if self.is_sklearn_format():
return BaseModel.evaluate_metrics_by_sklearn(self, x_test, y_test, input_function_name)
try:
import xgboost as xgb
import pandas as pd
import numpy as np
function_name = input_function_name if input_function_name else self.mining_function(y_test)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
y_pred = pd.DataFrame(self.model.predict(xgb.DMatrix(x_test))).apply(lambda x: np.argmax(np.array([x])),
axis=1)
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
y_pred = pd.DataFrame(self.model.predict(xgb.DMatrix(x_test)))
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
class LightGBMModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
def is_support(self):
try:
import lightgbm as lgb
return isinstance(self.model, lgb.Booster) or \
isinstance(self.model, lgb.LGBMClassifier) or \
isinstance(self.model, lgb.LGBMRegressor)
except:
return False
def is_sklearn_format(self):
import lightgbm as lgb
return isinstance(self.model, lgb.LGBMClassifier) or isinstance(self.model, lgb.LGBMRegressor)
def model_type(self):
return 'LightGBM'
def model_version(self):
import lightgbm as lgb
return BaseModel.extract_major_minor_version(lgb.__version__)
def mining_function(self, y_test):
import lightgbm as lgb
if isinstance(self.model, lgb.LGBMClassifier):
return FUNCTION_NAME_CLASSIFICATION
if isinstance(self.model, lgb.LGBMRegressor):
return FUNCTION_NAME_REGRESSION
return self._infer_mining_function(y_test)
def serialization(self):
return 'joblib' if self.is_sklearn_format() else 'lightgbm'
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
if x_test is None or y_test is None:
return {}
if self.is_sklearn_format():
return BaseModel.evaluate_metrics_by_sklearn(self, x_test, y_test, input_function_name)
try:
import lightgbm as lgb
import pandas as pd
import numpy as np
function_name = input_function_name if input_function_name else self.mining_function(y_test)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
y_pred = pd.DataFrame(self.model.predict(x_test)).apply(lambda x: np.argmax(np.array([x])),
axis=1)
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
y_pred = pd.DataFrame(self.model.predict(x_test))
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
class KerasModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
self.tf_keras = False
def is_support(self):
try:
from keras.models import Model
if isinstance(self.model, Model):
return True
return self._is_support_tf_keras()
except:
return self._is_support_tf_keras()
def _is_support_tf_keras(self):
try:
import tensorflow as tf
self.tf_keras = isinstance(self.model, tf.keras.Model)
return self.tf_keras
except:
return False
def model_type(self):
return 'tf.Keras' if self.tf_keras else 'Keras'
def model_version(self):
if self.tf_keras:
import tensorflow as tf
return BaseModel.extract_major_minor_version(tf.keras.__version__)
else:
import keras
return BaseModel.extract_major_minor_version(keras.__version__)
def mining_function(self, y_test):
return self._infer_mining_function(y_test)
def serialization(self):
return 'hdf5'
def predictors(self, x_test, data_test):
result = []
row = None
columns = None
if x_test is not None:
x_test = self._series_to_dataframe(x_test)
shape = x_test.shape
if isinstance(x_test, pd.DataFrame):
row = x_test.iloc[0]
columns = list(x_test.columns)
else:
row = x_test[0]
for idx, x in enumerate(self.model.inputs):
name = x.name
if hasattr(self.model, 'input_names'):
name = self.model.input_names[idx]
tensor_shape = self._normalize_tensor_shape(x.shape)
result.append({
'name': name,
'sample': [row.tolist()] if row is not None and self._compatible_shape(tensor_shape, shape) else None,
'type': np.dtype(x.dtype.as_numpy_dtype).name,
'shape': tensor_shape
})
if columns is not None and result[-1]['sample'] is not None:
result[-1]['columns'] = columns
return result
def targets(self, y_test, data_test):
if y_test is None:
return []
result = []
y_test = self._series_to_dataframe(y_test)
if isinstance(y_test, pd.DataFrame):
row = json.loads(y_test.iloc[0].to_json())
cols = row.keys()
for x in cols:
result.append(({
'name': x,
'sample': row[x],
'type': type(row[x]).__name__
}))
else:
row = y_test[0]
result.append({
'name': 'tensor_target',
'sample': row.tolist(),
'type': y_test.dtype.name,
'shape': self._normalize_np_shape(y_test.shape)
})
return result
def outputs(self, y_test, data_test, **kwargs):
result = []
for idx, x in enumerate(self.model.outputs):
name = x.name
if hasattr(self.model, 'output_names'):
name = self.model.output_names[idx]
result.append(({
'name': name,
'type': np.dtype(x.dtype.as_numpy_dtype).name,
'shape': self._normalize_tensor_shape(x.shape)
}))
return result
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
if x_test is None or y_test is None:
return {}
try:
import numpy as np
import pandas as pd
function_name = input_function_name if input_function_name else self.mining_function(y_test)
# convert to numpy array if not
x_test = BaseModel._to_ndarray(x_test)
y_test = BaseModel._to_ndarray(y_test)
shape = y_test.shape
if len(shape) > 1 and shape[1] > 1:
y_test = np.argmax(y_test, axis=1)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
y_pred = pd.DataFrame(self.model.predict(x_test)).apply(lambda x: np.argmax(np.array([x])),
axis=1)
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
y_pred = pd.DataFrame(self.model.predict(x_test))
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
@staticmethod
def _normalize_tensor_shape(tensor_shape):
return [d.value for d in tensor_shape]
class PytorchModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
def is_support(self):
try:
from torch import nn
return isinstance(self.model, nn.Module)
except:
return False
def model_type(self):
return 'Pytorch'
def model_version(self):
import torch
return BaseModel.extract_major_minor_version(torch.__version__)
def mining_function(self, y_test):
return self._infer_mining_function(y_test)
def serialization(self):
return 'pt'
def predictors(self, x_test, data_test):
result = []
columns = None
shape = None
sample = None
if x_test is not None:
x_test = self._series_to_dataframe(x_test)
dtype = x_test.dtype
shape = x_test.shape
if isinstance(x_test, pd.DataFrame):
row = x_test.iloc[0]
columns = list(x_test.columns)
else:
row = x_test[0]
sample = [row.tolist()]
else:
import torch
dtype = torch.Tensor(1).numpy().dtype
result.append({
'name': 'tensor_input',
'sample': sample,
'type': dtype.name,
'shape': self._normalize_np_shape(shape)
})
if columns is not None and result[-1]['sample'] is not None:
result[-1]['columns'] = columns
return result
def targets(self, y_test, data_test):
if y_test is None:
return []
result = []
y_test = self._series_to_dataframe(y_test)
if isinstance(y_test, pd.DataFrame):
row = json.loads(y_test.iloc[0].to_json())
else:
row = y_test[0]
result.append({
'name': 'tensor_target',
'sample': row.tolist(),
'type': y_test.dtype.name,
'shape': self._normalize_np_shape(y_test.shape)
})
return result
def outputs(self, y_test, data_test, **kwargs):
result = []
if 'x_test' in kwargs:
x_test = self._to_ndarray(kwargs['x_test'])
if x_test is not None:
shape = list(x_test.shape)
if len(shape) > 0 and shape[0] > 1:
shape[0] = 1
import torch
data = self.model(torch.randn(*shape)).data.numpy()
result.append(({
'name': 'tensor_output',
'type': data.dtype.name,
'shape': self._normalize_np_shape(data.shape)
}))
return result
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
if x_test is None or y_test is None:
return {}
try:
import numpy as np
import pandas as pd
import torch
function_name = input_function_name if input_function_name else self.mining_function(y_test)
# convert to numpy array if not
x_test = BaseModel._to_ndarray(x_test)
y_test = BaseModel._to_ndarray(y_test)
shape = y_test.shape
if len(shape) > 1 and shape[1] > 1:
y_test = np.argmax(y_test, axis=1)
if function_name == FUNCTION_NAME_CLASSIFICATION:
from sklearn.metrics import accuracy_score
dtype = torch.Tensor(1).dtype
data = self.model(torch.from_numpy(x_test).type(dtype)).data.numpy()
y_pred = pd.DataFrame(data).apply(lambda x: np.argmax(np.array([x])), axis=1)
accuracy = accuracy_score(y_test, y_pred)
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
from sklearn.metrics import explained_variance_score
dtype = torch.Tensor(1).dtype
data = self.model(torch.from_numpy(x_test).type(dtype)).data.numpy()
y_pred = pd.DataFrame(data)
explained_variance = explained_variance_score(y_test, y_pred)
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
class SparkModel(BaseModel):
def __init__(self, model):
BaseModel.__init__(self, model)
def is_support(self):
try:
from pyspark.ml import Model
return isinstance(self.model, Model)
except:
return False
def is_pipeline_model(self):
try:
from pyspark.ml import PipelineModel
return isinstance(self.model, PipelineModel)
except:
return False
def model_type(self):
return 'Spark'
def model_version(self):
from pyspark import SparkConf, SparkContext
sc = SparkContext.getOrCreate(conf=SparkConf())
return BaseModel.extract_major_minor_version(sc.version)
def mining_function(self, y_test):
return BaseModel.mining_function(self, y_test)
def serialization(self):
return 'spark'
def evaluate_metrics(self, x_test, y_test, data_test, input_function_name):
if data_test is None:
return {}
try:
prediction = self.model.transform(data_test)
label_col = self.get_label_col()
predict_col = self.get_prediction_col()
function_name = input_function_name if input_function_name else self.mining_function(y_test)
if function_name == FUNCTION_NAME_CLASSIFICATION:
accuracy = prediction.rdd.filter(
lambda x: x[label_col] == x[predict_col]).count() * 1.0 / prediction.count()
return {
'accuracy': accuracy
}
elif function_name == FUNCTION_NAME_REGRESSION:
numerator = prediction.rdd.map(lambda x: x[label_col] - x[predict_col]).variance()
denominator = prediction.rdd.map(lambda x: x[label_col]).variance()
explained_variance = 1.0 - numerator / denominator
return {
'explainedVariance': explained_variance
}
else:
return {}
except:
return {}
def predictors(self, x_test, data_test):
if data_test is None:
return []
row = json.loads(data_test.limit(1).toPandas().iloc[0].to_json())
label_col = self.get_label_col()
cols = row.keys()
result = []
for x in cols:
if x != label_col:
result.append(({
'name': x,
'sample': row[x],
'type': type(row[x]).__name__
}))
return result
def targets(self, y_test, data_test):
if data_test is None:
return []
row = json.loads(data_test.limit(1).toPandas().iloc[0].to_json())
label_col = self.get_label_col()
cols = row.keys()
result = []
for x in cols:
if x == label_col:
result.append(({
'name': x,
'sample': row[x],
'type': type(row[x]).__name__
}))
return result
def get_label_col(self):
from pyspark.ml import PipelineModel
if isinstance(self.model, PipelineModel):
stages = self.model.stages
label_col = None
i = 0
for x in reversed(stages):
try:
label_col = x._call_java('getLabelCol')
i += 1
break;
except:
pass
# find the first input column
reversed_stages = stages[:]
reversed_stages.reverse()
for x in reversed_stages[i:]:
try:
if x._call_java('getOutputCol') == label_col:
label_col = x._call_java('getInputCol')
except:
pass
return 'label' if label_col is None else label_col
else:
label_col = None
try:
label_col = self.model._call_java('getLabelCol')
except:
label_col = 'label'
return label_col
def get_prediction_col(self):
from pyspark.ml import PipelineModel
if isinstance(self.model, PipelineModel):
stages = self.model.stages
try:
return stages[-1].getOutputCol()
except:
return 'prediction'
else:
try:
return self.model.getPredictionCol()
except:
return 'prediction'
def _ndarray_or_dataframe(data):
if data is not None:
if isinstance(data, (np.ndarray, pd.DataFrame, pd.Series)):
return data
return np.asarray(data)
return None
def get_model_metadata(model,
mining_function=None,
x_test=None,
y_test=None,
data_test=None,
features_json=None,
labels_json=None,
outputs_json=None,
source_object=None):
# The order of such list is significant, do not change it!
candidates = [LightGBMModel, XGBoostModel, SKLearnModel, SparkModel, KerasModel, PytorchModel,
PMMLModel, ONNXModel, CustomModel]
wrapped_model = None
for cls in candidates:
wrapped_model = cls(model)
if wrapped_model.is_support():
break
else:
wrapped_model = None
if wrapped_model is None:
raise ValueError('The model {class_name} is not recognized.'.format(class_name=model.__class__.__name__))
if wrapped_model.model_type() == 'Spark' and not wrapped_model.is_pipeline_model():
raise ValueError("The Spark model should be a PipelineModel, %s was given" % wrapped_model.__class__.__name__)
if mining_function is not None and mining_function not in SUPPORTED_FUNCTION_NAMES:
raise ValueError("mining_function should be one of %s, %s was given" % (
SUPPORTED_FUNCTION_NAMES, mining_function))
x_test = _ndarray_or_dataframe(x_test)
y_test = _ndarray_or_dataframe(y_test)
# get the source code of an input object
object_name = None
object_source = None
if source_object is not None:
try:
import inspect
object_name = source_object.__name__
object_source = inspect.getsource(source_object)
except:
pass
return {
'runtime': wrapped_model.runtime(),
'type': wrapped_model.model_type(),
'appVersion': wrapped_model.model_version(),
'functionName': mining_function if mining_function else wrapped_model.mining_function(y_test),
'serialization': wrapped_model.serialization(),
'algorithm': wrapped_model.algorithm(),
'metrics': wrapped_model.evaluate_metrics(x_test, y_test, data_test, mining_function),
'predictors': features_json if features_json else wrapped_model.predictors(x_test, data_test),
'targets': labels_json if labels_json else wrapped_model.targets(y_test, data_test),
'outputs': outputs_json if outputs_json else wrapped_model.outputs(y_test, data_test, x_test=x_test),
'objectSource': object_source,
'objectName': object_name
}
def save_model(model, model_path, serialization=None):
if serialization is None:
metadata = get_model_metadata(model)
serialization = metadata['serialization']
if serialization not in SUPPORTED_SERIALIZATIONS: # pragma: no cover
raise ValueError("serialization should be one of %s, %s was given" % (
SUPPORTED_SERIALIZATIONS, serialization))
raw_model = model
if serialization == 'joblib':
try:
import joblib
except ImportError:
from sklearn.externals import joblib
joblib.dump(model, model_path)
elif serialization == 'pickle':
import pickle
with open(model_path, 'wb') as f:
pickle.dump(model, f)
elif serialization == 'xgboost':
model.save_model(model_path)
elif serialization == 'hdf5':
model.save(model_path)
elif serialization == 'pt':
import torch
torch.save(model.state_dict(), model_path)
elif serialization == 'spark':
from pyspark.ml import PipelineModel
model.write().overwrite().save(model_path)
elif serialization == 'pmml':
if hasattr(model, 'read') and callable(model.read):
model = model.read()
if os.path.exists(model):
with open(model, mode='rb') as f:
model = f.read()
mode = 'wb' if isinstance(model, (bytes, bytearray)) else 'w'
with open(model_path, mode) as file:
file.write(model)
elif serialization == 'onnx':
import onnx
if isinstance(model, onnx.ModelProto):
onnx_model = model
elif isinstance(model, (bytes, bytearray)):
onnx_model = onnx.load_model_from_string(model)
else:
onnx_model = onnx.load_model(model)
onnx.save(onnx_model, model_path)
elif serialization == 'lightgbm':
model.save_model(model_path)
return raw_model
| [
"onnx.save",
"onnx.load_model",
"torch.from_numpy",
"numpy.array",
"xgboost.DMatrix",
"os.path.exists",
"pypmml.Model.close",
"sklearn.base.is_classifier",
"numpy.asarray",
"pyspark.SparkConf",
"onnx.load_model_from_string",
"pandas.DataFrame",
"numpy.dtype",
"torch.randn",
"pypmml.Model... | [((18149, 18174), 'sklearn.base.is_classifier', 'is_classifier', (['self.model'], {}), '(self.model)\n', (18162, 18174), False, 'from sklearn.base import is_classifier, is_regressor\n'), ((18235, 18259), 'sklearn.base.is_regressor', 'is_regressor', (['self.model'], {}), '(self.model)\n', (18247, 18259), False, 'from sklearn.base import is_classifier, is_regressor\n'), ((38217, 38233), 'numpy.asarray', 'np.asarray', (['data'], {}), '(data)\n', (38227, 38233), True, 'import numpy as np\n'), ((41405, 41435), 'sklearn.externals.joblib.dump', 'joblib.dump', (['model', 'model_path'], {}), '(model, model_path)\n', (41416, 41435), False, 'from sklearn.externals import joblib\n'), ((2030, 2050), 'pandas.DataFrame', 'pd.DataFrame', (['x_test'], {}), '(x_test)\n', (2042, 2050), True, 'import pandas as pd\n'), ((3053, 3073), 'pandas.DataFrame', 'pd.DataFrame', (['y_test'], {}), '(y_test)\n', (3065, 3073), True, 'import pandas as pd\n'), ((5749, 5767), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (5761, 5767), True, 'import pandas as pd\n'), ((6364, 6386), 'pandas.DataFrame', 'pd.DataFrame', (['x_y_test'], {}), '(x_y_test)\n', (6376, 6386), True, 'import pandas as pd\n'), ((12935, 12971), 'onnx.checker.check_model', 'onnx.checker.check_model', (['onnx_model'], {}), '(onnx_model)\n', (12959, 12971), False, 'import onnx\n'), ((39926, 39958), 'inspect.getsource', 'inspect.getsource', (['source_object'], {}), '(source_object)\n', (39943, 39958), False, 'import inspect\n'), ((4726, 4756), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (4740, 4756), False, 'from sklearn.metrics import accuracy_score\n'), ((7916, 7929), 'pypmml.Model.close', 'Model.close', ([], {}), '()\n', (7927, 7929), False, 'from pypmml import Model\n'), ((8443, 8472), 'os.path.exists', 'os.path.exists', (['model_content'], {}), '(model_content)\n', (8457, 8472), False, 'import os\n'), ((8696, 8709), 'pypmml.Model.close', 'Model.close', ([], {}), '()\n', (8707, 8709), False, 'from pypmml import Model\n'), ((12749, 12788), 'onnx.load_model_from_string', 'onnx.load_model_from_string', (['self.model'], {}), '(self.model)\n', (12776, 12788), False, 'import onnx\n'), ((12894, 12921), 'onnx.load_model', 'onnx.load_model', (['self.model'], {}), '(self.model)\n', (12909, 12921), False, 'import onnx\n'), ((15022, 15047), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (15031, 15047), True, 'import numpy as np\n'), ((15455, 15473), 'numpy.asarray', 'np.asarray', (['y_pred'], {}), '(y_pred)\n', (15465, 15473), True, 'import numpy as np\n'), ((20736, 20766), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (20750, 20766), False, 'from sklearn.metrics import accuracy_score\n'), ((23356, 23386), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (23370, 23386), False, 'from sklearn.metrics import accuracy_score\n'), ((28027, 28052), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (28036, 28052), True, 'import numpy as np\n'), ((28403, 28433), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (28417, 28433), False, 'from sklearn.metrics import accuracy_score\n'), ((32392, 32417), 'numpy.argmax', 'np.argmax', (['y_test'], {'axis': '(1)'}), '(y_test, axis=1)\n', (32401, 32417), True, 'import numpy as np\n'), ((32792, 32822), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (32806, 32822), False, 'from sklearn.metrics import accuracy_score\n'), ((34116, 34127), 'pyspark.SparkConf', 'SparkConf', ([], {}), '()\n', (34125, 34127), False, 'from pyspark import SparkConf, SparkContext\n'), ((41548, 41569), 'pickle.dump', 'pickle.dump', (['model', 'f'], {}), '(model, f)\n', (41559, 41569), False, 'import pickle\n'), ((5065, 5105), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (5089, 5105), False, 'from sklearn.metrics import explained_variance_score\n'), ((8512, 8541), 'pypmml.Model.fromFile', 'Model.fromFile', (['model_content'], {}), '(model_content)\n', (8526, 8541), False, 'from pypmml import Model\n'), ((8602, 8633), 'pypmml.Model.fromString', 'Model.fromString', (['model_content'], {}), '(model_content)\n', (8618, 8633), False, 'from pypmml import Model\n'), ((10145, 10191), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred[prediction_col]'], {}), '(y_test, y_pred[prediction_col])\n', (10159, 10191), False, 'from sklearn.metrics import accuracy_score\n'), ((15592, 15617), 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), '(y_pred, axis=1)\n', (15601, 15617), True, 'import numpy as np\n'), ((15814, 15844), 'sklearn.metrics.accuracy_score', 'accuracy_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (15828, 15844), False, 'from sklearn.metrics import accuracy_score\n'), ((21096, 21136), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (21120, 21136), False, 'from sklearn.metrics import explained_variance_score\n'), ((23703, 23743), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (23727, 23743), False, 'from sklearn.metrics import explained_variance_score\n'), ((28750, 28790), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (28774, 28790), False, 'from sklearn.metrics import explained_variance_score\n'), ((32564, 32579), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (32576, 32579), False, 'import torch\n'), ((33192, 33210), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (33204, 33210), True, 'import pandas as pd\n'), ((33248, 33288), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (33272, 33288), False, 'from sklearn.metrics import explained_variance_score\n'), ((10527, 10583), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_test', 'y_pred[prediction_col]'], {}), '(y_test, y_pred[prediction_col])\n', (10551, 10583), False, 'from sklearn.metrics import explained_variance_score\n'), ((16119, 16159), 'sklearn.metrics.explained_variance_score', 'explained_variance_score', (['y_test', 'y_pred'], {}), '(y_test, y_pred)\n', (16143, 16159), False, 'from sklearn.metrics import explained_variance_score\n'), ((25937, 25969), 'numpy.dtype', 'np.dtype', (['x.dtype.as_numpy_dtype'], {}), '(x.dtype.as_numpy_dtype)\n', (25945, 25969), True, 'import numpy as np\n'), ((27303, 27335), 'numpy.dtype', 'np.dtype', (['x.dtype.as_numpy_dtype'], {}), '(x.dtype.as_numpy_dtype)\n', (27311, 27335), True, 'import numpy as np\n'), ((30239, 30254), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (30251, 30254), False, 'import torch\n'), ((32696, 32714), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {}), '(data)\n', (32708, 32714), True, 'import pandas as pd\n'), ((33060, 33075), 'torch.Tensor', 'torch.Tensor', (['(1)'], {}), '(1)\n', (33072, 33075), False, 'import torch\n'), ((20600, 20613), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (20608, 20613), True, 'import numpy as np\n'), ((21037, 21056), 'xgboost.DMatrix', 'xgb.DMatrix', (['x_test'], {}), '(x_test)\n', (21048, 21056), True, 'import xgboost as xgb\n'), ((23220, 23233), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (23228, 23233), True, 'import numpy as np\n'), ((28267, 28280), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (28275, 28280), True, 'import numpy as np\n'), ((32741, 32754), 'numpy.array', 'np.array', (['[x]'], {}), '([x])\n', (32749, 32754), True, 'import numpy as np\n'), ((20552, 20571), 'xgboost.DMatrix', 'xgb.DMatrix', (['x_test'], {}), '(x_test)\n', (20563, 20571), True, 'import xgboost as xgb\n'), ((31518, 31537), 'torch.randn', 'torch.randn', (['*shape'], {}), '(*shape)\n', (31529, 31537), False, 'import torch\n'), ((42082, 42103), 'os.path.exists', 'os.path.exists', (['model'], {}), '(model)\n', (42096, 42103), False, 'import os\n'), ((42643, 42676), 'onnx.save', 'onnx.save', (['onnx_model', 'model_path'], {}), '(onnx_model, model_path)\n', (42652, 42676), False, 'import onnx\n'), ((32620, 32644), 'torch.from_numpy', 'torch.from_numpy', (['x_test'], {}), '(x_test)\n', (32636, 32644), False, 'import torch\n'), ((33116, 33140), 'torch.from_numpy', 'torch.from_numpy', (['x_test'], {}), '(x_test)\n', (33132, 33140), False, 'import torch\n'), ((42538, 42572), 'onnx.load_model_from_string', 'onnx.load_model_from_string', (['model'], {}), '(model)\n', (42565, 42572), False, 'import onnx\n'), ((42612, 42634), 'onnx.load_model', 'onnx.load_model', (['model'], {}), '(model)\n', (42627, 42634), False, 'import onnx\n')] |
#!/usr/bin/env python
"""
TODO:
# Author:
# Created Time :
# File Name:
# Description:
"""
import numpy as np
import scipy.sparse as sp
import random
import inspect
try:
import tensorflow as tf
except ImportError:
raise ImportError('DeepLinc requires TensorFlow. Please follow instructions'
' at https://www.tensorflow.org/install/ to install'
' it.')
# =============== Data processing ===============
# ===============================================
def sparse2tuple(sparse_mx):
if not sp.isspmatrix_coo(sparse_mx):
sparse_mx = sparse_mx.tocoo()
coords = np.vstack((sparse_mx.row, sparse_mx.col)).transpose()
values = sparse_mx.data
shape = sparse_mx.shape
return coords, values, shape
def preprocess_graph(adj):
adj = sp.coo_matrix(adj)
adj_ = adj + sp.eye(adj.shape[0])
rowsum = np.array(adj_.sum(1))
degree_mat_inv_sqrt = sp.diags(np.power(rowsum, -0.5).flatten())
adj_normalized = adj_.dot(degree_mat_inv_sqrt).transpose().dot(degree_mat_inv_sqrt).tocoo()
return adj_normalized
def ismember(tmp1, tmp2, tol=5):
"""
Judge whether there are overlapping elements in tmp1 and tmp2
"""
rows_close = np.all(np.round(tmp1 - tmp2[:, None], tol) == 0, axis=-1)
if True in np.any(rows_close, axis=-1).tolist():
return True
elif True not in np.any(rows_close, axis=-1).tolist():
return False
def retrieve_name(var):
callers_local_vars = list(inspect.currentframe().f_back.f_locals.items())
return [var_name for var_name, var_val in callers_local_vars if var_val is var]
def set_placeholder(adj, latent_dim):
var_placeholders = {
'features': tf.sparse_placeholder(tf.float32),
'adj': tf.sparse_placeholder(tf.float32),
'adj_orig': tf.sparse_placeholder(tf.float32),
'dropout': tf.placeholder_with_default(0., shape=()),
'real_distribution': tf.placeholder(dtype=tf.float32, shape=[adj.shape[0], latent_dim],
name='real_distribution')
}
return var_placeholders
def construct_feed_dict(adj_normalized, adj, features, placeholders):
# construct feed dictionary
feed_dict = dict()
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['adj']: adj_normalized})
feed_dict.update({placeholders['adj_orig']: adj}) #注意这里的adj_orig经过重新定义已经不是feas['adj_orig']了,在constructor中的update函数中是adj_label,是只有训练集
return feed_dict
# def unified_data_format(exp_values, adj_values):
# exp_values = sp.csr_matrix(exp_values)
# adj_values = sp.csr_matrix(adj_values)
# =============== Training and testing set splitting ===============
# ==================================================================
def sampling_test_edges_neg(n, test_edges, edges_double):
test_edges_false = []
while len(test_edges_false) < len(test_edges):
idx_i = np.random.randint(0, n)
idx_j = np.random.randint(0, n)
if idx_i == idx_j:
continue
if ismember([idx_i, idx_j], edges_double):
continue
if ismember([idx_j, idx_i], edges_double):
continue
if test_edges_false:
if ismember([idx_j, idx_i], np.array(test_edges_false)):
continue
if ismember([idx_i, idx_j], np.array(test_edges_false)):
continue
test_edges_false.append([idx_i, idx_j])
return test_edges_false
def train_test_split(adj_values, test_ratio=0.1):
# Get the id of all edges
edges_single = sparse2tuple(sp.triu(adj_values))[0] #single direction of edges
edges_double = sparse2tuple(adj_values)[0] #double direction of edges
if test_ratio > 1:
test_ratio = test_ratio/edges_single.shape[0]
# Split into train and test sets
num_test = int(np.floor(edges_single.shape[0] * test_ratio))
all_edges_idx = list(range(edges_single.shape[0]))
np.random.shuffle(all_edges_idx)
test_edges_idx = all_edges_idx[:num_test]
test_edges = edges_single[test_edges_idx]
if (adj_values.shape[0]**2-adj_values.sum()-adj_values.shape[0])/2 < 2*len(test_edges):
raise ImportError('The network is too dense, please reduce the proportion of test set or delete some edges in the network.')
else:
test_edges_false = sampling_test_edges_neg(adj_values.shape[0], test_edges, edges_double)
train_edges = np.delete(edges_single, test_edges_idx, axis=0)
# Mark the train and test sets in the adj matrix
data = np.ones(train_edges.shape[0])
adj_train = sp.csr_matrix((data, (train_edges[:, 0], train_edges[:, 1])), shape=adj_values.shape)
adj_train = adj_train + adj_train.T
data = np.ones(test_edges.shape[0])
adj_test = sp.csr_matrix((data, (test_edges[:, 0], test_edges[:, 1])), shape=adj_values.shape)
adj_test = adj_test + adj_test.T
return adj_train, adj_test, train_edges, test_edges, test_edges_false #return single direction of edges
def packed_data(exp_values, adj_values, test_ratio=0.1):
exp_values = sp.csr_matrix(exp_values)
adj_values = sp.csr_matrix(adj_values)
adj_train, adj_test, train_edges, test_edges, test_edges_false = train_test_split(adj_values, test_ratio)
adj_norm = sparse2tuple(preprocess_graph(adj_train))
num_nodes = adj_train.shape[0]
features = sparse2tuple(exp_values.tocoo())
num_features = features[2][1]
features_nonzero = features[1].shape[0]
pos_weight = float(adj_train.shape[0]**2-adj_train.sum())/adj_train.sum()
norm = adj_train.shape[0]**2/float((adj_train.shape[0]*adj_train.shape[0]-adj_train.sum())*2)
adj_label = adj_train + sp.eye(adj_train.shape[0])
adj_label = sparse2tuple(adj_label)
items = [adj_train, num_features, num_nodes, features_nonzero, pos_weight,
norm, adj_norm, adj_label, features, train_edges, test_edges, test_edges_false]
feas = {}
for item in items:
feas[retrieve_name(item).pop()] = item
return feas
# =============== Building optimizer ===============
# ==================================================
class OptimizerVAE(object):
def __init__(self, preds, labels, model, num_nodes, pos_weight, norm, d_real, d_fake, learning_rate_1, learning_rate_2):
preds_sub = preds
labels_sub = labels
# Discrimminator Loss
dc_loss_real = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_real), logits=d_real))
dc_loss_fake = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.zeros_like(d_fake), logits=d_fake))
self.dc_loss = dc_loss_fake + dc_loss_real
# Generator loss
self.generator_loss = tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(labels=tf.ones_like(d_fake), logits=d_fake))
self.cost = norm * tf.reduce_mean(
tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_1) # Adam Optimizer
all_variables = tf.trainable_variables()
dc_var = [var for var in all_variables if 'dc_' in var.op.name]
en_var = [var for var in all_variables if 'e_' in var.op.name]
with tf.variable_scope(tf.get_variable_scope(), reuse=False):
self.discriminator_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_2,
beta1=0.9, name='adam1').minimize(self.dc_loss, var_list=dc_var)#minimize(dc_loss_real, var_list=dc_var)
self.generator_optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_2,
beta1=0.9, name='adam2').minimize(self.generator_loss,
var_list=en_var)
self.cost = norm * tf.reduce_mean(tf.nn.weighted_cross_entropy_with_logits(logits=preds_sub, targets=labels_sub, pos_weight=pos_weight))
self.optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate_1) # Adam Optimizer
# Latent loss
self.log_lik = self.cost
self.kl = (0.5 / num_nodes) * tf.reduce_mean(tf.reduce_sum(1 + 2 * model.z_log_std - tf.square(model.z_mean) -
tf.square(tf.exp(model.z_log_std)), 1))
self.cost -= self.kl
self.opt_op = self.optimizer.minimize(self.cost)
self.grads_vars = self.optimizer.compute_gradients(self.cost)
def set_optimizer(model, discriminator, placeholders, pos_weight, norm, num_nodes, lr, dc_lr):
opt = OptimizerVAE(preds = model.reconstructions,
labels = tf.reshape(tf.sparse_tensor_to_dense(placeholders['adj_orig'],
validate_indices=False), [-1]),
model = model,
num_nodes = num_nodes,
pos_weight = pos_weight,
norm = norm,
d_real = discriminator.construct(placeholders['real_distribution']),
d_fake = discriminator.construct(model.embeddings, reuse=True),
learning_rate_1 = lr,
learning_rate_2 = dc_lr)
return opt
# =============== Building model updating function ===============
# ================================================================
def update(model, opt, sess, adj_norm, adj_label, features, placeholders, adj, dropout_rate, latent_dim):
# Construct feed dictionary
feed_dict = construct_feed_dict(adj_norm, adj_label, features, placeholders)
feed_dict.update({placeholders['dropout']: dropout_rate})
feed_dict.update({placeholders['dropout']: 0})
emb_hidden1 = sess.run(model.h1, feed_dict=feed_dict)
emb_hidden2 = sess.run(model.embeddings, feed_dict=feed_dict)
z_real_dist = np.random.randn(adj.shape[0], latent_dim)
feed_dict.update({placeholders['real_distribution']: z_real_dist})
for j in range(20):
_, reconstruct_loss = sess.run([opt.opt_op, opt.cost], feed_dict=feed_dict)
d_loss, _ = sess.run([opt.dc_loss, opt.discriminator_optimizer], feed_dict=feed_dict)
g_loss, _ = sess.run([opt.generator_loss, opt.generator_optimizer], feed_dict=feed_dict)
avg_cost = reconstruct_loss
return emb_hidden1, emb_hidden2, avg_cost
# =============== Others ===============
# ======================================
def ranked_partial(adj_orig, adj_rec, coord, size): #size是list,[3,5]代表把总图切成宽3份(x)、高5份(y)的子图
x_gap = (coord[:,0].max()-coord[:,0].min())/size[0]
y_gap = (coord[:,1].max()-coord[:,1].min())/size[1]
x_point = np.arange(coord[:,0].min(), coord[:,0].max(), x_gap).tolist()
if coord[:,0].max() not in x_point:
x_point += [coord[:,0].max()]
y_point = np.arange(coord[:,1].min(), coord[:,1].max(), y_gap).tolist()
if coord[:,1].max() not in y_point:
y_point += [coord[:,1].max()]
x_interval = [[x_point[i],x_point[i+1]] for i in range(len(x_point)) if i!=len(x_point)-1]
y_interval = [[y_point[i],y_point[i+1]] for i in range(len(y_point)) if i!=len(y_point)-1]
id_part = {}
subregion_mark = []
for i in x_interval:
for j in y_interval:
id_list = np.where((coord[:,0]>=i[0]) & (coord[:,0]<i[1]) & (coord[:,1]>=j[0]) & (coord[:,1]<j[1]))[0].tolist() #左开右闭,上开下闭
adj_orig_tmp = adj_orig[id_list,:][:,id_list]
adj_rec_tmp = adj_rec[id_list,:][:,id_list]
if adj_orig_tmp.shape[0]*adj_orig_tmp.shape[1] == 0:
break
else:
diff = np.where((adj_orig_tmp-adj_rec_tmp)!=0)[0].shape[0] / (adj_orig_tmp.shape[0]*adj_orig_tmp.shape[1])
id_part[diff] = id_list
subregion_mark.append([i,j])
return sorted(id_part.items(), key=lambda item:item[0], reverse=True), subregion_mark
| [
"tensorflow.sparse_placeholder",
"tensorflow.get_variable_scope",
"numpy.array",
"tensorflow.ones_like",
"scipy.sparse.isspmatrix_coo",
"tensorflow.nn.weighted_cross_entropy_with_logits",
"scipy.sparse.eye",
"numpy.where",
"numpy.delete",
"tensorflow.placeholder",
"numpy.vstack",
"scipy.sparse... | [((818, 836), 'scipy.sparse.coo_matrix', 'sp.coo_matrix', (['adj'], {}), '(adj)\n', (831, 836), True, 'import scipy.sparse as sp\n'), ((4000, 4032), 'numpy.random.shuffle', 'np.random.shuffle', (['all_edges_idx'], {}), '(all_edges_idx)\n', (4017, 4032), True, 'import numpy as np\n'), ((4476, 4523), 'numpy.delete', 'np.delete', (['edges_single', 'test_edges_idx'], {'axis': '(0)'}), '(edges_single, test_edges_idx, axis=0)\n', (4485, 4523), True, 'import numpy as np\n'), ((4589, 4618), 'numpy.ones', 'np.ones', (['train_edges.shape[0]'], {}), '(train_edges.shape[0])\n', (4596, 4618), True, 'import numpy as np\n'), ((4635, 4725), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(data, (train_edges[:, 0], train_edges[:, 1]))'], {'shape': 'adj_values.shape'}), '((data, (train_edges[:, 0], train_edges[:, 1])), shape=\n adj_values.shape)\n', (4648, 4725), True, 'import scipy.sparse as sp\n'), ((4772, 4800), 'numpy.ones', 'np.ones', (['test_edges.shape[0]'], {}), '(test_edges.shape[0])\n', (4779, 4800), True, 'import numpy as np\n'), ((4816, 4904), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['(data, (test_edges[:, 0], test_edges[:, 1]))'], {'shape': 'adj_values.shape'}), '((data, (test_edges[:, 0], test_edges[:, 1])), shape=\n adj_values.shape)\n', (4829, 4904), True, 'import scipy.sparse as sp\n'), ((5123, 5148), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['exp_values'], {}), '(exp_values)\n', (5136, 5148), True, 'import scipy.sparse as sp\n'), ((5166, 5191), 'scipy.sparse.csr_matrix', 'sp.csr_matrix', (['adj_values'], {}), '(adj_values)\n', (5179, 5191), True, 'import scipy.sparse as sp\n'), ((10150, 10191), 'numpy.random.randn', 'np.random.randn', (['adj.shape[0]', 'latent_dim'], {}), '(adj.shape[0], latent_dim)\n', (10165, 10191), True, 'import numpy as np\n'), ((555, 583), 'scipy.sparse.isspmatrix_coo', 'sp.isspmatrix_coo', (['sparse_mx'], {}), '(sparse_mx)\n', (572, 583), True, 'import scipy.sparse as sp\n'), ((854, 874), 'scipy.sparse.eye', 'sp.eye', (['adj.shape[0]'], {}), '(adj.shape[0])\n', (860, 874), True, 'import scipy.sparse as sp\n'), ((1713, 1746), 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1734, 1746), True, 'import tensorflow as tf\n'), ((1763, 1796), 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1784, 1796), True, 'import tensorflow as tf\n'), ((1818, 1851), 'tensorflow.sparse_placeholder', 'tf.sparse_placeholder', (['tf.float32'], {}), '(tf.float32)\n', (1839, 1851), True, 'import tensorflow as tf\n'), ((1872, 1914), 'tensorflow.placeholder_with_default', 'tf.placeholder_with_default', (['(0.0)'], {'shape': '()'}), '(0.0, shape=())\n', (1899, 1914), True, 'import tensorflow as tf\n'), ((1944, 2041), 'tensorflow.placeholder', 'tf.placeholder', ([], {'dtype': 'tf.float32', 'shape': '[adj.shape[0], latent_dim]', 'name': '"""real_distribution"""'}), "(dtype=tf.float32, shape=[adj.shape[0], latent_dim], name=\n 'real_distribution')\n", (1958, 2041), True, 'import tensorflow as tf\n'), ((2970, 2993), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n'], {}), '(0, n)\n', (2987, 2993), True, 'import numpy as np\n'), ((3010, 3033), 'numpy.random.randint', 'np.random.randint', (['(0)', 'n'], {}), '(0, n)\n', (3027, 3033), True, 'import numpy as np\n'), ((3895, 3939), 'numpy.floor', 'np.floor', (['(edges_single.shape[0] * test_ratio)'], {}), '(edges_single.shape[0] * test_ratio)\n', (3903, 3939), True, 'import numpy as np\n'), ((5730, 5756), 'scipy.sparse.eye', 'sp.eye', (['adj_train.shape[0]'], {}), '(adj_train.shape[0])\n', (5736, 5756), True, 'import scipy.sparse as sp\n'), ((7105, 7158), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate_1'}), '(learning_rate=learning_rate_1)\n', (7127, 7158), True, 'import tensorflow as tf\n'), ((7202, 7226), 'tensorflow.trainable_variables', 'tf.trainable_variables', ([], {}), '()\n', (7224, 7226), True, 'import tensorflow as tf\n'), ((8205, 8258), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate_1'}), '(learning_rate=learning_rate_1)\n', (8227, 8258), True, 'import tensorflow as tf\n'), ((636, 677), 'numpy.vstack', 'np.vstack', (['(sparse_mx.row, sparse_mx.col)'], {}), '((sparse_mx.row, sparse_mx.col))\n', (645, 677), True, 'import numpy as np\n'), ((1236, 1271), 'numpy.round', 'np.round', (['(tmp1 - tmp2[:, None])', 'tol'], {}), '(tmp1 - tmp2[:, None], tol)\n', (1244, 1271), True, 'import numpy as np\n'), ((3633, 3652), 'scipy.sparse.triu', 'sp.triu', (['adj_values'], {}), '(adj_values)\n', (3640, 3652), True, 'import scipy.sparse as sp\n'), ((945, 967), 'numpy.power', 'np.power', (['rowsum', '(-0.5)'], {}), '(rowsum, -0.5)\n', (953, 967), True, 'import numpy as np\n'), ((1302, 1329), 'numpy.any', 'np.any', (['rows_close'], {'axis': '(-1)'}), '(rows_close, axis=-1)\n', (1308, 1329), True, 'import numpy as np\n'), ((3295, 3321), 'numpy.array', 'np.array', (['test_edges_false'], {}), '(test_edges_false)\n', (3303, 3321), True, 'import numpy as np\n'), ((3389, 3415), 'numpy.array', 'np.array', (['test_edges_false'], {}), '(test_edges_false)\n', (3397, 3415), True, 'import numpy as np\n'), ((6977, 7083), 'tensorflow.nn.weighted_cross_entropy_with_logits', 'tf.nn.weighted_cross_entropy_with_logits', ([], {'logits': 'preds_sub', 'targets': 'labels_sub', 'pos_weight': 'pos_weight'}), '(logits=preds_sub, targets=\n labels_sub, pos_weight=pos_weight)\n', (7017, 7083), True, 'import tensorflow as tf\n'), ((7403, 7426), 'tensorflow.get_variable_scope', 'tf.get_variable_scope', ([], {}), '()\n', (7424, 7426), True, 'import tensorflow as tf\n'), ((8077, 8183), 'tensorflow.nn.weighted_cross_entropy_with_logits', 'tf.nn.weighted_cross_entropy_with_logits', ([], {'logits': 'preds_sub', 'targets': 'labels_sub', 'pos_weight': 'pos_weight'}), '(logits=preds_sub, targets=\n labels_sub, pos_weight=pos_weight)\n', (8117, 8183), True, 'import tensorflow as tf\n'), ((8911, 8986), 'tensorflow.sparse_tensor_to_dense', 'tf.sparse_tensor_to_dense', (["placeholders['adj_orig']"], {'validate_indices': '(False)'}), "(placeholders['adj_orig'], validate_indices=False)\n", (8936, 8986), True, 'import tensorflow as tf\n'), ((1381, 1408), 'numpy.any', 'np.any', (['rows_close'], {'axis': '(-1)'}), '(rows_close, axis=-1)\n', (1387, 1408), True, 'import numpy as np\n'), ((6526, 6546), 'tensorflow.ones_like', 'tf.ones_like', (['d_real'], {}), '(d_real)\n', (6538, 6546), True, 'import tensorflow as tf\n'), ((6662, 6683), 'tensorflow.zeros_like', 'tf.zeros_like', (['d_fake'], {}), '(d_fake)\n', (6675, 6683), True, 'import tensorflow as tf\n'), ((6883, 6903), 'tensorflow.ones_like', 'tf.ones_like', (['d_fake'], {}), '(d_fake)\n', (6895, 6903), True, 'import tensorflow as tf\n'), ((7485, 7563), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate_2', 'beta1': '(0.9)', 'name': '"""adam1"""'}), "(learning_rate=learning_rate_2, beta1=0.9, name='adam1')\n", (7507, 7563), True, 'import tensorflow as tf\n'), ((7750, 7828), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': 'learning_rate_2', 'beta1': '(0.9)', 'name': '"""adam2"""'}), "(learning_rate=learning_rate_2, beta1=0.9, name='adam2')\n", (7772, 7828), True, 'import tensorflow as tf\n'), ((1496, 1518), 'inspect.currentframe', 'inspect.currentframe', ([], {}), '()\n', (1516, 1518), False, 'import inspect\n'), ((11550, 11656), 'numpy.where', 'np.where', (['((coord[:, 0] >= i[0]) & (coord[:, 0] < i[1]) & (coord[:, 1] >= j[0]) & (\n coord[:, 1] < j[1]))'], {}), '((coord[:, 0] >= i[0]) & (coord[:, 0] < i[1]) & (coord[:, 1] >= j[0\n ]) & (coord[:, 1] < j[1]))\n', (11558, 11656), True, 'import numpy as np\n'), ((8426, 8449), 'tensorflow.square', 'tf.square', (['model.z_mean'], {}), '(model.z_mean)\n', (8435, 8449), True, 'import tensorflow as tf\n'), ((8529, 8552), 'tensorflow.exp', 'tf.exp', (['model.z_log_std'], {}), '(model.z_log_std)\n', (8535, 8552), True, 'import tensorflow as tf\n'), ((11906, 11947), 'numpy.where', 'np.where', (['(adj_orig_tmp - adj_rec_tmp != 0)'], {}), '(adj_orig_tmp - adj_rec_tmp != 0)\n', (11914, 11947), True, 'import numpy as np\n')] |
"""
Module that contains many useful utilities
for validating data or function arguments
"""
from typing import Iterable, Union
import warnings
import numpy as np
from my_happy_pandas.core.dtypes.common import is_bool
def _check_arg_length(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether 'args' has length of at most 'compat_args'. Raises
a TypeError if that is not the case, similar to in Python when a
function is called with too many arguments.
"""
if max_fname_arg_count < 0:
raise ValueError("'max_fname_arg_count' must be non-negative")
if len(args) > len(compat_args):
max_arg_count = len(compat_args) + max_fname_arg_count
actual_arg_count = len(args) + max_fname_arg_count
argument = "argument" if max_arg_count == 1 else "arguments"
raise TypeError(
f"{fname}() takes at most {max_arg_count} {argument} "
f"({actual_arg_count} given)"
)
def _check_for_default_values(fname, arg_val_dict, compat_args):
"""
Check that the keys in `arg_val_dict` are mapped to their
default values as specified in `compat_args`.
Note that this function is to be called only when it has been
checked that arg_val_dict.keys() is a subset of compat_args
"""
for key in arg_val_dict:
# try checking equality directly with '=' operator,
# as comparison may have been overridden for the left
# hand object
try:
v1 = arg_val_dict[key]
v2 = compat_args[key]
# check for None-ness otherwise we could end up
# comparing a numpy array vs None
if (v1 is not None and v2 is None) or (v1 is None and v2 is not None):
match = False
else:
match = v1 == v2
if not is_bool(match):
raise ValueError("'match' is not a boolean")
# could not compare them directly, so try comparison
# using the 'is' operator
except ValueError:
match = arg_val_dict[key] is compat_args[key]
if not match:
raise ValueError(
f"the '{key}' parameter is not supported in "
f"the pandas implementation of {fname}()"
)
def validate_args(fname, args, max_fname_arg_count, compat_args):
"""
Checks whether the length of the `*args` argument passed into a function
has at most `len(compat_args)` arguments and whether or not all of these
elements in `args` are set to their default values.
Parameters
----------
fname : str
The name of the function being passed the `*args` parameter
args : tuple
The `*args` parameter passed into a function
max_fname_arg_count : int
The maximum number of arguments that the function `fname`
can accept, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args : dict
A dictionary of keys and their associated default values.
In order to accommodate buggy behaviour in some versions of `numpy`,
where a signature displayed keyword arguments but then passed those
arguments **positionally** internally when calling downstream
implementations, a dict ensures that the original
order of the keyword arguments is enforced.
Raises
------
TypeError
If `args` contains more values than there are `compat_args`
ValueError
If `args` contains values that do not correspond to those
of the default values specified in `compat_args`
"""
_check_arg_length(fname, args, max_fname_arg_count, compat_args)
# We do this so that we can provide a more informative
# error message about the parameters that we are not
# supporting in the pandas implementation of 'fname'
kwargs = dict(zip(compat_args, args))
_check_for_default_values(fname, kwargs, compat_args)
def _check_for_invalid_keys(fname, kwargs, compat_args):
"""
Checks whether 'kwargs' contains any keys that are not
in 'compat_args' and raises a TypeError if there is one.
"""
# set(dict) --> set of the dictionary's keys
diff = set(kwargs) - set(compat_args)
if diff:
bad_arg = list(diff)[0]
raise TypeError(f"{fname}() got an unexpected keyword argument '{bad_arg}'")
def validate_kwargs(fname, kwargs, compat_args):
"""
Checks whether parameters passed to the **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname : str
The name of the function being passed the `**kwargs` parameter
kwargs : dict
The `**kwargs` parameter passed into `fname`
compat_args: dict
A dictionary of keys that `kwargs` is allowed to have and their
associated default values
Raises
------
TypeError if `kwargs` contains keys not in `compat_args`
ValueError if `kwargs` contains keys in `compat_args` that do not
map to the default values specified in `compat_args`
"""
kwds = kwargs.copy()
_check_for_invalid_keys(fname, kwargs, compat_args)
_check_for_default_values(fname, kwds, compat_args)
def validate_args_and_kwargs(fname, args, kwargs, max_fname_arg_count, compat_args):
"""
Checks whether parameters passed to the *args and **kwargs argument in a
function `fname` are valid parameters as specified in `*compat_args`
and whether or not they are set to their default values.
Parameters
----------
fname: str
The name of the function being passed the `**kwargs` parameter
args: tuple
The `*args` parameter passed into a function
kwargs: dict
The `**kwargs` parameter passed into `fname`
max_fname_arg_count: int
The minimum number of arguments that the function `fname`
requires, excluding those in `args`. Used for displaying
appropriate error messages. Must be non-negative.
compat_args: dict
A dictionary of keys that `kwargs` is allowed to
have and their associated default values.
Raises
------
TypeError if `args` contains more values than there are
`compat_args` OR `kwargs` contains keys not in `compat_args`
ValueError if `args` contains values not at the default value (`None`)
`kwargs` contains keys in `compat_args` that do not map to the default
value as specified in `compat_args`
See Also
--------
validate_args : Purely args validation.
validate_kwargs : Purely kwargs validation.
"""
# Check that the total number of arguments passed in (i.e.
# args and kwargs) does not exceed the length of compat_args
_check_arg_length(
fname, args + tuple(kwargs.values()), max_fname_arg_count, compat_args
)
# Check there is no overlap with the positional and keyword
# arguments, similar to what is done in actual Python functions
args_dict = dict(zip(compat_args, args))
for key in args_dict:
if key in kwargs:
raise TypeError(
f"{fname}() got multiple values for keyword argument '{key}'"
)
kwargs.update(args_dict)
validate_kwargs(fname, kwargs, compat_args)
def validate_bool_kwarg(value, arg_name):
""" Ensures that argument passed in arg_name is of type bool. """
if not (is_bool(value) or value is None):
raise ValueError(
f'For argument "{arg_name}" expected type bool, received '
f"type {type(value).__name__}."
)
return value
def validate_axis_style_args(data, args, kwargs, arg_name, method_name):
"""
Argument handler for mixed index, columns / axis functions
In an attempt to handle both `.method(index, columns)`, and
`.method(arg, axis=.)`, we have to do some bad things to argument
parsing. This translates all arguments to `{index=., columns=.}` style.
Parameters
----------
data : DataFrame
args : tuple
All positional arguments from the user
kwargs : dict
All keyword arguments from the user
arg_name, method_name : str
Used for better error messages
Returns
-------
kwargs : dict
A dictionary of keyword arguments. Doesn't modify ``kwargs``
inplace, so update them with the return value here.
Examples
--------
>>> df._validate_axis_style_args((str.upper,), {'columns': id},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
This emits a warning
>>> df._validate_axis_style_args((str.upper, id), {},
... 'mapper', 'rename')
{'columns': <function id>, 'index': <method 'upper' of 'str' objects>}
"""
# TODO: Change to keyword-only args and remove all this
out = {}
# Goal: fill 'out' with index/columns-style arguments
# like out = {'index': foo, 'columns': bar}
# Start by validating for consistency
if "axis" in kwargs and any(x in kwargs for x in data._AXIS_TO_AXIS_NUMBER):
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'."
raise TypeError(msg)
# First fill with explicit values provided by the user...
if arg_name in kwargs:
if args:
msg = f"{method_name} got multiple values for argument '{arg_name}'"
raise TypeError(msg)
axis = data._get_axis_name(kwargs.get("axis", 0))
out[axis] = kwargs[arg_name]
# More user-provided arguments, now from kwargs
for k, v in kwargs.items():
try:
ax = data._get_axis_name(k)
except ValueError:
pass
else:
out[ax] = v
# All user-provided kwargs have been handled now.
# Now we supplement with positional arguments, emitting warnings
# when there's ambiguity and raising when there's conflicts
if len(args) == 0:
pass # It's up to the function to decide if this is valid
elif len(args) == 1:
axis = data._get_axis_name(kwargs.get("axis", 0))
out[axis] = args[0]
elif len(args) == 2:
if "axis" in kwargs:
# Unambiguously wrong
msg = "Cannot specify both 'axis' and any of 'index' or 'columns'"
raise TypeError(msg)
msg = (
f"Interpreting call\n\t'.{method_name}(a, b)' as "
f"\n\t'.{method_name}(index=a, columns=b)'.\nUse named "
"arguments to remove any ambiguity. In the future, using "
"positional arguments for 'index' or 'columns' will raise "
"a 'TypeError'."
)
warnings.warn(msg, FutureWarning, stacklevel=4)
out[data._get_axis_name(0)] = args[0]
out[data._get_axis_name(1)] = args[1]
else:
msg = f"Cannot specify all of '{arg_name}', 'index', 'columns'."
raise TypeError(msg)
return out
def validate_fillna_kwargs(value, method, validate_scalar_dict_value=True):
"""
Validate the keyword arguments to 'fillna'.
This checks that exactly one of 'value' and 'method' is specified.
If 'method' is specified, this validates that it's a valid method.
Parameters
----------
value, method : object
The 'value' and 'method' keyword arguments for 'fillna'.
validate_scalar_dict_value : bool, default True
Whether to validate that 'value' is a scalar or dict. Specifically,
validate that it is not a list or tuple.
Returns
-------
value, method : object
"""
from my_happy_pandas.core.missing import clean_fill_method
if value is None and method is None:
raise ValueError("Must specify a fill 'value' or 'method'.")
elif value is None and method is not None:
method = clean_fill_method(method)
elif value is not None and method is None:
if validate_scalar_dict_value and isinstance(value, (list, tuple)):
raise TypeError(
'"value" parameter must be a scalar or dict, but '
f'you passed a "{type(value).__name__}"'
)
elif value is not None and method is not None:
raise ValueError("Cannot specify both 'value' and 'method'.")
return value, method
def validate_percentile(q: Union[float, Iterable[float]]) -> np.ndarray:
"""
Validate percentiles (used by describe and quantile).
This function checks if the given float or iterable of floats is a valid percentile
otherwise raises a ValueError.
Parameters
----------
q: float or iterable of floats
A single percentile or an iterable of percentiles.
Returns
-------
ndarray
An ndarray of the percentiles if valid.
Raises
------
ValueError if percentiles are not in given interval([0, 1]).
"""
q_arr = np.asarray(q)
# Don't change this to an f-string. The string formatting
# is too expensive for cases where we don't need it.
msg = "percentiles should all be in the interval [0, 1]. Try {} instead."
if q_arr.ndim == 0:
if not 0 <= q_arr <= 1:
raise ValueError(msg.format(q_arr / 100.0))
else:
if not all(0 <= qs <= 1 for qs in q_arr):
raise ValueError(msg.format(q_arr / 100.0))
return q_arr
| [
"my_happy_pandas.core.dtypes.common.is_bool",
"my_happy_pandas.core.missing.clean_fill_method",
"numpy.asarray",
"warnings.warn"
] | [((13010, 13023), 'numpy.asarray', 'np.asarray', (['q'], {}), '(q)\n', (13020, 13023), True, 'import numpy as np\n'), ((7514, 7528), 'my_happy_pandas.core.dtypes.common.is_bool', 'is_bool', (['value'], {}), '(value)\n', (7521, 7528), False, 'from my_happy_pandas.core.dtypes.common import is_bool\n'), ((11962, 11987), 'my_happy_pandas.core.missing.clean_fill_method', 'clean_fill_method', (['method'], {}), '(method)\n', (11979, 11987), False, 'from my_happy_pandas.core.missing import clean_fill_method\n'), ((1844, 1858), 'my_happy_pandas.core.dtypes.common.is_bool', 'is_bool', (['match'], {}), '(match)\n', (1851, 1858), False, 'from my_happy_pandas.core.dtypes.common import is_bool\n'), ((10820, 10867), 'warnings.warn', 'warnings.warn', (['msg', 'FutureWarning'], {'stacklevel': '(4)'}), '(msg, FutureWarning, stacklevel=4)\n', (10833, 10867), False, 'import warnings\n')] |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import math
import numpy
import torch
import sklearn
import sklearn.svm
import sklearn.externals
import sklearn.model_selection
import utils
import losses
import networks
class TimeSeriesEncoderClassifier(sklearn.base.BaseEstimator,
sklearn.base.ClassifierMixin):
"""
"Virtual" class to wrap an encoder of time series as a PyTorch module and
a SVM classifier with RBF kernel on top of its computed representations in
a scikit-learn class.
All inheriting classes should implement the get_params and set_params
methods, as in the recommendations of scikit-learn.
@param compared_length Maximum length of randomly chosen time series. If
None, this parameter is ignored.
@param nb_random_samples Number of randomly chosen intervals to select the
final negative sample in the loss.
@param negative_penalty Multiplicative coefficient for the negative sample
loss.
@param batch_size Batch size used during the training of the encoder.
@param nb_steps Number of optimization steps to perform for the training of
the encoder.
@param lr learning rate of the Adam optimizer used to train the encoder.
@param penalty Penalty term for the SVM classifier. If None and if the
number of samples is high enough, performs a hyperparameter search
to find a suitable constant.
@param early_stopping Enables, if not None, early stopping heuristic
for the training of the representations, based on the final
score. Representations are still learned unsupervisedly in this
case. If the number of samples per class is no more than 10,
disables this heuristic. If not None, accepts an integer
representing the patience of the early stopping strategy.
@param encoder Encoder PyTorch module.
@param params Dictionaries of the parameters of the encoder.
@param in_channels Number of input channels of the time series.
@param cuda Transfers, if True, all computations to the GPU.
@param gpu GPU index to use, if CUDA is enabled.
"""
def __init__(self, compared_length, nb_random_samples, negative_penalty,
batch_size, nb_steps, lr, penalty, early_stopping,
encoder, params, in_channels, out_channels, cuda=False,
gpu=0):
self.architecture = ''
self.cuda = cuda
self.gpu = gpu
self.batch_size = batch_size
self.nb_steps = nb_steps
self.lr = lr
self.penalty = penalty
self.early_stopping = early_stopping
self.encoder = encoder
self.params = params
self.in_channels = in_channels
self.out_channels = out_channels
self.loss = losses.triplet_loss.TripletLoss(
compared_length, nb_random_samples, negative_penalty
)
self.loss_varying = losses.triplet_loss.TripletLossVaryingLength(
compared_length, nb_random_samples, negative_penalty
)
self.classifier = sklearn.svm.SVC()
self.optimizer = torch.optim.Adam(self.encoder.parameters(), lr=lr)
def save_encoder(self, prefix_file):
"""
Saves the encoder and the SVM classifier.
@param prefix_file Path and prefix of the file where the models should
be saved (at '$(prefix_file)_$(architecture)_encoder.pth').
"""
torch.save(
self.encoder.state_dict(),
prefix_file + '_' + self.architecture + '_encoder.pth'
)
def save(self, prefix_file):
"""
Saves the encoder and the SVM classifier.
@param prefix_file Path and prefix of the file where the models should
be saved (at '$(prefix_file)_$(architecture)_classifier.pkl' and
'$(prefix_file)_$(architecture)_encoder.pth').
"""
self.save_encoder(prefix_file)
sklearn.externals.joblib.dump(
self.classifier,
prefix_file + '_' + self.architecture + '_classifier.pkl'
)
def load_encoder(self, prefix_file):
"""
Loads an encoder.
@param prefix_file Path and prefix of the file where the model should
be loaded (at '$(prefix_file)_$(architecture)_encoder.pth').
"""
if self.cuda:
self.encoder.load_state_dict(torch.load(
prefix_file + '_' + self.architecture + '_encoder.pth',
map_location=lambda storage, loc: storage.cuda(self.gpu)
))
else:
self.encoder.load_state_dict(torch.load(
prefix_file + '_' + self.architecture + '_encoder.pth',
map_location=lambda storage, loc: storage
))
def load(self, prefix_file):
"""
Loads an encoder and an SVM classifier.
@param prefix_file Path and prefix of the file where the models should
be loaded (at '$(prefix_file)_$(architecture)_classifier.pkl'
and '$(prefix_file)_$(architecture)_encoder.pth').
"""
self.load_encoder(prefix_file)
self.classifier = sklearn.externals.joblib.load(
prefix_file + '_' + self.architecture + '_classifier.pkl'
)
def fit_classifier(self, features, y):
"""
Trains the classifier using precomputed features. Uses an SVM
classifier with RBF kernel.
@param features Computed features of the training set.
@param y Training labels.
"""
nb_classes = numpy.shape(numpy.unique(y, return_counts=True)[1])[0]
train_size = numpy.shape(features)[0]
# To use a 1-NN classifier, no need for model selection, simply
# replace the code by the following:
# import sklearn.neighbors
# self.classifier = sklearn.neighbors.KNeighborsClassifier(
# n_neighbors=1
# )
# return self.classifier.fit(features, y)
self.classifier = sklearn.svm.SVC(
C=1 / self.penalty
if self.penalty is not None and self.penalty > 0
else numpy.inf,
gamma='scale'
)
if train_size // nb_classes < 5 or train_size < 50:
return self.classifier.fit(features, y)
else:
if self.penalty is None:
grid_search = sklearn.model_selection.GridSearchCV(
self.classifier, {
'C': [
0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000,
numpy.inf
],
'kernel': ['rbf'],
'degree': [3],
'gamma': ['scale'],
'coef0': [0],
'shrinking': [True],
'probability': [False],
'tol': [0.001],
'cache_size': [200],
'class_weight': [None],
'verbose': [False],
'max_iter': [10000000],
'decision_function_shape': ['ovr'],
'random_state': [None]
},
cv=5, iid=False, n_jobs=5
)
if train_size <= 10000:
grid_search.fit(features, y)
else:
# If the training set is too large, subsample 10000 train
# examples
split = sklearn.model_selection.train_test_split(
features, y,
train_size=10000, random_state=0, stratify=y
)
grid_search.fit(split[0], split[2])
self.classifier = grid_search.best_estimator_
return self.classifier
def fit_encoder(self, X, y=None, save_memory=False, verbose=False):
"""
Trains the encoder unsupervisedly using the given training data.
@param X Training set.
@param y Training labels, used only for early stopping, if enabled. If
None, disables early stopping in the method.
@param save_memory If True, enables to save GPU memory by propagating
gradients after each loss term of the encoder loss, instead of
doing it after computing the whole loss.
@param verbose Enables, if True, to monitor which epoch is running in
the encoder training.
"""
# Check if the given time series have unequal lengths
varying = bool(numpy.isnan(numpy.sum(X)))
train = torch.from_numpy(X)
if self.cuda:
train = train.cuda(self.gpu)
if y is not None:
nb_classes = numpy.shape(numpy.unique(y, return_counts=True)[1])[0]
train_size = numpy.shape(X)[0]
ratio = train_size // nb_classes
train_torch_dataset = utils.Dataset(X)
train_generator = torch.utils.data.DataLoader(
train_torch_dataset, batch_size=self.batch_size, shuffle=True
)
max_score = 0
i = 0 # Number of performed optimization steps
epochs = 0 # Number of performed epochs
count = 0 # Count of number of epochs without improvement
# Will be true if, by enabling epoch_selection, a model was selected
# using cross-validation
found_best = False
# Encoder training
while i < self.nb_steps:
if verbose:
print('Epoch: ', epochs + 1)
for batch in train_generator:
if self.cuda:
batch = batch.cuda(self.gpu)
self.optimizer.zero_grad()
if not varying:
loss = self.loss(
batch, self.encoder, train, save_memory=save_memory
)
else:
loss = self.loss_varying(
batch, self.encoder, train, save_memory=save_memory
)
loss.backward()
self.optimizer.step()
i += 1
if i >= self.nb_steps:
break
epochs += 1
# Early stopping strategy
if self.early_stopping is not None and y is not None and (
ratio >= 5 and train_size >= 50
):
# Computes the best regularization parameters
features = self.encode(X)
self.classifier = self.fit_classifier(features, y)
# Cross validation score
score = numpy.mean(sklearn.model_selection.cross_val_score(
self.classifier, features, y=y, cv=5, n_jobs=5
))
count += 1
# If the model is better than the previous one, update
if score > max_score:
count = 0
found_best = True
max_score = score
best_encoder = type(self.encoder)(**self.params)
best_encoder.double()
if self.cuda:
best_encoder.cuda(self.gpu)
best_encoder.load_state_dict(self.encoder.state_dict())
if count == self.early_stopping:
break
# If a better model was found, use it
if found_best:
self.encoder = best_encoder
return self.encoder
def fit(self, X, y, save_memory=False, verbose=False):
"""
Trains sequentially the encoder unsupervisedly and then the classifier
using the given labels over the learned features.
@param X Training set.
@param y Training labels.
@param save_memory If True, enables to save GPU memory by propagating
gradients after each loss term of the encoder loss, instead of
doing it after computing the whole loss.
@param verbose Enables, if True, to monitor which epoch is running in
the encoder training.
"""
# Fitting encoder
self.encoder = self.fit_encoder(
X, y=y, save_memory=save_memory, verbose=verbose
)
# SVM classifier training
features = self.encode(X)
self.classifier = self.fit_classifier(features, y)
return self
def encode(self, X, batch_size=50):
"""
Outputs the representations associated to the input by the encoder.
@param X Testing set.
@param batch_size Size of batches used for splitting the test data to
avoid out of memory errors when using CUDA. Ignored if the
testing set contains time series of unequal lengths.
"""
# Check if the given time series have unequal lengths
varying = bool(numpy.isnan(numpy.sum(X)))
test = utils.Dataset(X)
test_generator = torch.utils.data.DataLoader(
test, batch_size=batch_size if not varying else 1
)
features = numpy.zeros((numpy.shape(X)[0], self.out_channels))
self.encoder = self.encoder.eval()
count = 0
with torch.no_grad():
if not varying:
for batch in test_generator:
if self.cuda:
batch = batch.cuda(self.gpu)
features[
count * batch_size: (count + 1) * batch_size
] = self.encoder(batch).cpu()
count += 1
else:
for batch in test_generator:
if self.cuda:
batch = batch.cuda(self.gpu)
length = batch.size(2) - torch.sum(
torch.isnan(batch[0, 0])
).data.cpu().numpy()
features[count: count + 1] = self.encoder(
batch[:, :, :length]
).cpu()
count += 1
self.encoder = self.encoder.train()
return features
def encode_window(self, X, window, batch_size=50, window_batch_size=10000):
"""
Outputs the representations associated to the input by the encoder,
for each subseries of the input of the given size (sliding window
representations).
@param X Testing set.
@param window Size of the sliding window.
@param batch_size Size of batches used for splitting the test data to
avoid out of memory errors when using CUDA.
@param window_batch_size Size of batches of windows to compute in a
run of encode, to save RAM.
"""
features = numpy.empty((
numpy.shape(X)[0], self.out_channels,
numpy.shape(X)[2] - window + 1
))
masking = numpy.empty((
min(window_batch_size, numpy.shape(X)[2] - window + 1),
numpy.shape(X)[1], window
))
for b in range(numpy.shape(X)[0]):
for i in range(math.ceil(
(numpy.shape(X)[2] - window + 1) / window_batch_size)
):
for j in range(
i * window_batch_size,
min(
(i + 1) * window_batch_size,
numpy.shape(X)[2] - window + 1
)
):
j0 = j - i * window_batch_size
masking[j0, :, :] = X[b, :, j: j + window]
features[
b, :, i * window_batch_size: (i + 1) * window_batch_size
] = numpy.swapaxes(
self.encode(masking[:j0 + 1], batch_size=batch_size), 0, 1
)
return features
def predict(self, X, batch_size=50):
"""
Outputs the class predictions for the given test data.
@param X Testing set.
@param batch_size Size of batches used for splitting the test data to
avoid out of memory errors when using CUDA. Ignored if the
testing set contains time series of unequal lengths.
"""
features = self.encode(X, batch_size=batch_size)
return self.classifier.predict(features)
def score(self, X, y, batch_size=50):
"""
Outputs accuracy of the SVM classifier on the given testing data.
@param X Testing set.
@param y Testing labels.
@param batch_size Size of batches used for splitting the test data to
avoid out of memory errors when using CUDA. Ignored if the
testing set contains time series of unequal lengths.
"""
features = self.encode(X, batch_size=batch_size)
return self.classifier.score(features, y)
class CausalCNNEncoderClassifier(TimeSeriesEncoderClassifier):
"""
Wraps a causal CNN encoder of time series as a PyTorch module and a
SVM classifier on top of its computed representations in a scikit-learn
class.
@param compared_length Maximum length of randomly chosen time series. If
None, this parameter is ignored.
@param nb_random_samples Number of randomly chosen intervals to select the
final negative sample in the loss.
@param negative_penalty Multiplicative coefficient for the negative sample
loss.
@param batch_size Batch size used during the training of the encoder.
@param nb_steps Number of optimization steps to perform for the training of
the encoder.
@param lr learning rate of the Adam optimizer used to train the encoder.
@param penalty Penalty term for the SVM classifier. If None and if the
number of samples is high enough, performs a hyperparameter search
to find a suitable constant.
@param early_stopping Enables, if not None, early stopping heuristic
for the training of the representations, based on the final
score. Representations are still learned unsupervisedly in this
case. If the number of samples per class is no more than 10,
disables this heuristic. If not None, accepts an integer
representing the patience of the early stopping strategy.
@param channels Number of channels manipulated in the causal CNN.
@param depth Depth of the causal CNN.
@param reduced_size Fixed length to which the output time series of the
causal CNN is reduced.
@param out_channels Number of features in the final output.
@param kernel_size Kernel size of the applied non-residual convolutions.
@param in_channels Number of input channels of the time series.
@param cuda Transfers, if True, all computations to the GPU.
@param gpu GPU index to use, if CUDA is enabled.
"""
def __init__(self, compared_length=50, nb_random_samples=10,
negative_penalty=1, batch_size=1, nb_steps=2000, lr=0.001,
penalty=1, early_stopping=None, channels=10, depth=1,
reduced_size=10, out_channels=10, kernel_size=4,
in_channels=1, cuda=False, gpu=0):
super(CausalCNNEncoderClassifier, self).__init__(
compared_length, nb_random_samples, negative_penalty, batch_size,
nb_steps, lr, penalty, early_stopping,
self.__create_encoder(in_channels, channels, depth, reduced_size,
out_channels, kernel_size, cuda, gpu),
self.__encoder_params(in_channels, channels, depth, reduced_size,
out_channels, kernel_size),
in_channels, out_channels, cuda, gpu
)
self.architecture = 'CausalCNN'
self.channels = channels
self.depth = depth
self.reduced_size = reduced_size
self.kernel_size = kernel_size
def __create_encoder(self, in_channels, channels, depth, reduced_size,
out_channels, kernel_size, cuda, gpu):
encoder = networks.causal_cnn.CausalCNNEncoder(
in_channels, channels, depth, reduced_size, out_channels,
kernel_size
)
encoder.double()
if cuda:
encoder.cuda(gpu)
return encoder
def __encoder_params(self, in_channels, channels, depth, reduced_size,
out_channels, kernel_size):
return {
'in_channels': in_channels,
'channels': channels,
'depth': depth,
'reduced_size': reduced_size,
'out_channels': out_channels,
'kernel_size': kernel_size
}
def encode_sequence(self, X, batch_size=50):
"""
Outputs the representations associated to the input by the encoder,
from the start of the time series to each time step (i.e., the
evolution of the representations of the input time series with
repect to time steps).
Takes advantage of the causal CNN (before the max pooling), wich
ensures that its output at time step i only depends on time step i and
previous time steps.
@param X Testing set.
@param batch_size Size of batches used for splitting the test data to
avoid out of memory errors when using CUDA. Ignored if the
testing set contains time series of unequal lengths.
"""
# Check if the given time series have unequal lengths
varying = bool(numpy.isnan(numpy.sum(X)))
test = utils.Dataset(X)
test_generator = torch.utils.data.DataLoader(
test, batch_size=batch_size if not varying else 1
)
length = numpy.shape(X)[2]
features = numpy.full(
(numpy.shape(X)[0], self.out_channels, length), numpy.nan
)
self.encoder = self.encoder.eval()
causal_cnn = self.encoder.network[0]
linear = self.encoder.network[3]
count = 0
with torch.no_grad():
if not varying:
for batch in test_generator:
if self.cuda:
batch = batch.cuda(self.gpu)
# First applies the causal CNN
output_causal_cnn = causal_cnn(batch)
after_pool = torch.empty(
output_causal_cnn.size(), dtype=torch.double
)
if self.cuda:
after_pool = after_pool.cuda(self.gpu)
after_pool[:, :, 0] = output_causal_cnn[:, :, 0]
# Then for each time step, computes the output of the max
# pooling layer
for i in range(1, length):
after_pool[:, :, i] = torch.max(
torch.cat([
after_pool[:, :, i - 1: i],
output_causal_cnn[:, :, i: i+1]
], dim=2),
dim=2
)[0]
features[
count * batch_size: (count + 1) * batch_size, :, :
] = torch.transpose(linear(
torch.transpose(after_pool, 1, 2)
), 1, 2)
count += 1
else:
for batch in test_generator:
if self.cuda:
batch = batch.cuda(self.gpu)
length = batch.size(2) - torch.sum(
torch.isnan(batch[0, 0])
).data.cpu().numpy()
output_causal_cnn = causal_cnn(batch)
after_pool = torch.empty(
output_causal_cnn.size(), dtype=torch.double
)
if self.cuda:
after_pool = after_pool.cuda(self.gpu)
after_pool[:, :, 0] = output_causal_cnn[:, :, 0]
for i in range(1, length):
after_pool[:, :, i] = torch.max(
torch.cat([
after_pool[:, :, i - 1: i],
output_causal_cnn[:, :, i: i+1]
], dim=2),
dim=2
)[0]
features[
count: count + 1, :, :
] = torch.transpose(linear(
torch.transpose(after_pool, 1, 2)
), 1, 2)
count += 1
self.encoder = self.encoder.train()
return features
def get_params(self, deep=True):
return {
'compared_length': self.loss.compared_length,
'nb_random_samples': self.loss.nb_random_samples,
'negative_penalty': self.loss.negative_penalty,
'batch_size': self.batch_size,
'nb_steps': self.nb_steps,
'lr': self.lr,
'penalty': self.penalty,
'early_stopping': self.early_stopping,
'channels': self.channels,
'depth': self.depth,
'reduced_size': self.reduced_size,
'kernel_size': self.kernel_size,
'in_channels': self.in_channels,
'out_channels': self.out_channels,
'cuda': self.cuda,
'gpu': self.gpu
}
def set_params(self, compared_length, nb_random_samples, negative_penalty,
batch_size, nb_steps, lr, penalty, early_stopping,
channels, depth, reduced_size, out_channels, kernel_size,
in_channels, cuda, gpu):
self.__init__(
compared_length, nb_random_samples, negative_penalty, batch_size,
nb_steps, lr, penalty, early_stopping, channels, depth,
reduced_size, out_channels, kernel_size, in_channels, cuda, gpu
)
return self
class LSTMEncoderClassifier(TimeSeriesEncoderClassifier):
"""
Wraps an LSTM encoder of time series as a PyTorch module and a SVM
classifier on top of its computed representations in a scikit-learn
class.
@param compared_length Maximum length of randomly chosen time series. If
None, this parameter is ignored.
@param nb_random_samples Number of randomly chosen intervals to select the
final negative sample in the loss.
@param negative_penalty Multiplicative coefficient for the negative sample
loss.
@param batch_size Batch size used during the training of the encoder.
@param nb_steps Number of optimization steps to perform for the training of
the encoder.
@param lr learning rate of the Adam optimizer used to train the encoder.
@param penalty Penalty term for the SVM classifier. If None and if the
number of samples is high enough, performs a hyperparameter search
to find a suitable constant.
@param early_stopping Enables, if not None, early stopping heuristic
for the training of the representations, based on the final
score. Representations are still learned unsupervisedly in this
case. If the number of samples per class is no more than 10,
disables this heuristic. If not None, accepts an integer
representing the patience of the early stopping strategy.
@param cuda Transfers, if True, all computations to the GPU.
@param in_channels Number of input channels of the time series.
@param gpu GPU index to use, if CUDA is enabled.
"""
def __init__(self, compared_length=50, nb_random_samples=10,
negative_penalty=1, batch_size=1, nb_steps=2000, lr=0.001,
penalty=1, early_stopping=None, in_channels=1, cuda=False,
gpu=0):
super(LSTMEncoderClassifier, self).__init__(
compared_length, nb_random_samples, negative_penalty, batch_size,
nb_steps, lr, penalty, early_stopping,
self.__create_encoder(cuda, gpu), {}, in_channels, 160, cuda, gpu
)
assert in_channels == 1
self.architecture = 'LSTM'
def __create_encoder(self, cuda, gpu):
encoder = networks.lstm.LSTMEncoder()
encoder.double()
if cuda:
encoder.cuda(gpu)
return encoder
def get_params(self, deep=True):
return {
'compared_length': self.loss.compared_length,
'nb_random_samples': self.loss.nb_random_samples,
'negative_penalty': self.loss.negative_penalty,
'batch_size': self.batch_size,
'nb_steps': self.nb_steps,
'lr': self.lr,
'penalty': self.penalty,
'early_stopping': self.early_stopping,
'in_channels': self.in_channels,
'cuda': self.cuda,
'gpu': self.gpu
}
def set_params(self, compared_length, nb_random_samples, negative_penalty,
batch_size, nb_steps, lr, penalty, early_stopping,
in_channels, cuda, gpu):
self.__init__(
compared_length, nb_random_samples, negative_penalty, batch_size,
nb_steps, lr, penalty, early_stopping, in_channels, cuda, gpu
)
return self
| [
"sklearn.model_selection.GridSearchCV",
"sklearn.externals.joblib.load",
"torch.from_numpy",
"losses.triplet_loss.TripletLoss",
"networks.causal_cnn.CausalCNNEncoder",
"torch.isnan",
"networks.lstm.LSTMEncoder",
"sklearn.model_selection.cross_val_score",
"sklearn.model_selection.train_test_split",
... | [((3578, 3667), 'losses.triplet_loss.TripletLoss', 'losses.triplet_loss.TripletLoss', (['compared_length', 'nb_random_samples', 'negative_penalty'], {}), '(compared_length, nb_random_samples,\n negative_penalty)\n', (3609, 3667), False, 'import losses\n'), ((3714, 3816), 'losses.triplet_loss.TripletLossVaryingLength', 'losses.triplet_loss.TripletLossVaryingLength', (['compared_length', 'nb_random_samples', 'negative_penalty'], {}), '(compared_length,\n nb_random_samples, negative_penalty)\n', (3758, 3816), False, 'import losses\n'), ((3861, 3878), 'sklearn.svm.SVC', 'sklearn.svm.SVC', ([], {}), '()\n', (3876, 3878), False, 'import sklearn\n'), ((4739, 4849), 'sklearn.externals.joblib.dump', 'sklearn.externals.joblib.dump', (['self.classifier', "(prefix_file + '_' + self.architecture + '_classifier.pkl')"], {}), "(self.classifier, prefix_file + '_' + self.\n architecture + '_classifier.pkl')\n", (4768, 4849), False, 'import sklearn\n'), ((5967, 6059), 'sklearn.externals.joblib.load', 'sklearn.externals.joblib.load', (["(prefix_file + '_' + self.architecture + '_classifier.pkl')"], {}), "(prefix_file + '_' + self.architecture +\n '_classifier.pkl')\n", (5996, 6059), False, 'import sklearn\n'), ((6808, 6927), 'sklearn.svm.SVC', 'sklearn.svm.SVC', ([], {'C': '(1 / self.penalty if self.penalty is not None and self.penalty > 0 else\n numpy.inf)', 'gamma': '"""scale"""'}), "(C=1 / self.penalty if self.penalty is not None and self.\n penalty > 0 else numpy.inf, gamma='scale')\n", (6823, 6927), False, 'import sklearn\n'), ((9474, 9493), 'torch.from_numpy', 'torch.from_numpy', (['X'], {}), '(X)\n', (9490, 9493), False, 'import torch\n'), ((9783, 9799), 'utils.Dataset', 'utils.Dataset', (['X'], {}), '(X)\n', (9796, 9799), False, 'import utils\n'), ((9826, 9920), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['train_torch_dataset'], {'batch_size': 'self.batch_size', 'shuffle': '(True)'}), '(train_torch_dataset, batch_size=self.batch_size,\n shuffle=True)\n', (9853, 9920), False, 'import torch\n'), ((13769, 13785), 'utils.Dataset', 'utils.Dataset', (['X'], {}), '(X)\n', (13782, 13785), False, 'import utils\n'), ((13811, 13889), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test'], {'batch_size': '(batch_size if not varying else 1)'}), '(test, batch_size=batch_size if not varying else 1)\n', (13838, 13889), False, 'import torch\n'), ((20879, 20990), 'networks.causal_cnn.CausalCNNEncoder', 'networks.causal_cnn.CausalCNNEncoder', (['in_channels', 'channels', 'depth', 'reduced_size', 'out_channels', 'kernel_size'], {}), '(in_channels, channels, depth,\n reduced_size, out_channels, kernel_size)\n', (20915, 20990), False, 'import networks\n'), ((22381, 22397), 'utils.Dataset', 'utils.Dataset', (['X'], {}), '(X)\n', (22394, 22397), False, 'import utils\n'), ((22423, 22501), 'torch.utils.data.DataLoader', 'torch.utils.data.DataLoader', (['test'], {'batch_size': '(batch_size if not varying else 1)'}), '(test, batch_size=batch_size if not varying else 1)\n', (22450, 22501), False, 'import torch\n'), ((29094, 29121), 'networks.lstm.LSTMEncoder', 'networks.lstm.LSTMEncoder', ([], {}), '()\n', (29119, 29121), False, 'import networks\n'), ((6447, 6468), 'numpy.shape', 'numpy.shape', (['features'], {}), '(features)\n', (6458, 6468), False, 'import numpy\n'), ((14058, 14073), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (14071, 14073), False, 'import torch\n'), ((22541, 22555), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (22552, 22555), False, 'import numpy\n'), ((22832, 22847), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (22845, 22847), False, 'import torch\n'), ((5416, 5529), 'torch.load', 'torch.load', (["(prefix_file + '_' + self.architecture + '_encoder.pth')"], {'map_location': '(lambda storage, loc: storage)'}), "(prefix_file + '_' + self.architecture + '_encoder.pth',\n map_location=lambda storage, loc: storage)\n", (5426, 5529), False, 'import torch\n'), ((7174, 7628), 'sklearn.model_selection.GridSearchCV', 'sklearn.model_selection.GridSearchCV', (['self.classifier', "{'C': [0.0001, 0.001, 0.01, 0.1, 1, 10, 100, 1000, 10000, numpy.inf],\n 'kernel': ['rbf'], 'degree': [3], 'gamma': ['scale'], 'coef0': [0],\n 'shrinking': [True], 'probability': [False], 'tol': [0.001],\n 'cache_size': [200], 'class_weight': [None], 'verbose': [False],\n 'max_iter': [10000000], 'decision_function_shape': ['ovr'],\n 'random_state': [None]}"], {'cv': '(5)', 'iid': '(False)', 'n_jobs': '(5)'}), "(self.classifier, {'C': [0.0001, 0.001,\n 0.01, 0.1, 1, 10, 100, 1000, 10000, numpy.inf], 'kernel': ['rbf'],\n 'degree': [3], 'gamma': ['scale'], 'coef0': [0], 'shrinking': [True],\n 'probability': [False], 'tol': [0.001], 'cache_size': [200],\n 'class_weight': [None], 'verbose': [False], 'max_iter': [10000000],\n 'decision_function_shape': ['ovr'], 'random_state': [None]}, cv=5, iid=\n False, n_jobs=5)\n", (7210, 7628), False, 'import sklearn\n'), ((9442, 9454), 'numpy.sum', 'numpy.sum', (['X'], {}), '(X)\n', (9451, 9454), False, 'import numpy\n'), ((9689, 9703), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (9700, 9703), False, 'import numpy\n'), ((13738, 13750), 'numpy.sum', 'numpy.sum', (['X'], {}), '(X)\n', (13747, 13750), False, 'import numpy\n'), ((15882, 15896), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (15893, 15896), False, 'import numpy\n'), ((22350, 22362), 'numpy.sum', 'numpy.sum', (['X'], {}), '(X)\n', (22359, 22362), False, 'import numpy\n'), ((6383, 6418), 'numpy.unique', 'numpy.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (6395, 6418), False, 'import numpy\n'), ((8350, 8453), 'sklearn.model_selection.train_test_split', 'sklearn.model_selection.train_test_split', (['features', 'y'], {'train_size': '(10000)', 'random_state': '(0)', 'stratify': 'y'}), '(features, y, train_size=10000,\n random_state=0, stratify=y)\n', (8390, 8453), False, 'import sklearn\n'), ((11500, 11592), 'sklearn.model_selection.cross_val_score', 'sklearn.model_selection.cross_val_score', (['self.classifier', 'features'], {'y': 'y', 'cv': '(5)', 'n_jobs': '(5)'}), '(self.classifier, features, y=y, cv=\n 5, n_jobs=5)\n', (11539, 11592), False, 'import sklearn\n'), ((13944, 13958), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (13955, 13958), False, 'import numpy\n'), ((15614, 15628), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (15625, 15628), False, 'import numpy\n'), ((15822, 15836), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (15833, 15836), False, 'import numpy\n'), ((22603, 22617), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (22614, 22617), False, 'import numpy\n'), ((9621, 9656), 'numpy.unique', 'numpy.unique', (['y'], {'return_counts': '(True)'}), '(y, return_counts=True)\n', (9633, 9656), False, 'import numpy\n'), ((15668, 15682), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (15679, 15682), False, 'import numpy\n'), ((24082, 24115), 'torch.transpose', 'torch.transpose', (['after_pool', '(1)', '(2)'], {}), '(after_pool, 1, 2)\n', (24097, 24115), False, 'import torch\n'), ((25352, 25385), 'torch.transpose', 'torch.transpose', (['after_pool', '(1)', '(2)'], {}), '(after_pool, 1, 2)\n', (25367, 25385), False, 'import torch\n'), ((15777, 15791), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (15788, 15791), False, 'import numpy\n'), ((23667, 23746), 'torch.cat', 'torch.cat', (['[after_pool[:, :, i - 1:i], output_causal_cnn[:, :, i:i + 1]]'], {'dim': '(2)'}), '([after_pool[:, :, i - 1:i], output_causal_cnn[:, :, i:i + 1]], dim=2)\n', (23676, 23746), False, 'import torch\n'), ((24965, 25044), 'torch.cat', 'torch.cat', (['[after_pool[:, :, i - 1:i], output_causal_cnn[:, :, i:i + 1]]'], {'dim': '(2)'}), '([after_pool[:, :, i - 1:i], output_causal_cnn[:, :, i:i + 1]], dim=2)\n', (24974, 25044), False, 'import torch\n'), ((15957, 15971), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (15968, 15971), False, 'import numpy\n'), ((16202, 16216), 'numpy.shape', 'numpy.shape', (['X'], {}), '(X)\n', (16213, 16216), False, 'import numpy\n'), ((14645, 14669), 'torch.isnan', 'torch.isnan', (['batch[0, 0]'], {}), '(batch[0, 0])\n', (14656, 14669), False, 'import torch\n'), ((24406, 24430), 'torch.isnan', 'torch.isnan', (['batch[0, 0]'], {}), '(batch[0, 0])\n', (24417, 24430), False, 'import torch\n')] |
import sys
sys.path.append("./Pendulum-problem/pendulum_problem")
import numpy as np
import tensorflow as tf
from ddpg import DeepDeterministicPolicyGradients
from replay_buffer import ReplayBuffer
from neural_nets import ActorNet, CriticNet
from exploration import OrnsteinUhlenbeckActionNoise
from camera_environment import CameraEnvironment
MINIBATCH_SIZE = 16
MAX_EPISODES = 3000
CAMERA_FAILURE_PROB = 0.05
TAU = 0.001
ACTOR_LEARNING_RATE = 0.0001
CRITIC_LEARNING_RATE = 0.001
GRADIENT_MAX_NORM = 5
BUFFER_SIZE = 1000000
DISCOUNT_FACTOR = 0.95
SEED = 42
SEEDTORUN = 5
def run_experiment(seed, postfix=""):
kernel_init = tf.keras.initializers.glorot_normal(seed)
environment = CameraEnvironment(CAMERA_FAILURE_PROB)
action_size = environment.action_size
state_size = environment.state_size
action_bounds = [b[1] for b in environment.action_bounds]
CRITIC_NET_STRUCTURE = [tf.keras.layers.Dense(600, kernel_initializer=kernel_init),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(600, kernel_initializer=kernel_init, activation=tf.nn.relu),
tf.keras.layers.Dense(1, kernel_initializer=kernel_init)
]
ACTOR_NET_STRUCTURE = [tf.keras.layers.Dense(600, kernel_initializer=kernel_init),
tf.keras.layers.BatchNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(600, kernel_initializer=kernel_init, activation=tf.nn.relu),
tf.keras.layers.Dense(action_size, kernel_initializer=kernel_init, activation=tf.nn.tanh)
]
actor_net = ActorNet(ACTOR_NET_STRUCTURE, action_bounds, TAU, ACTOR_LEARNING_RATE)
critic_net = CriticNet(CRITIC_NET_STRUCTURE, TAU, CRITIC_LEARNING_RATE, GRADIENT_MAX_NORM)
action_noise = OrnsteinUhlenbeckActionNoise(np.zeros((action_size,)), 0.3)
replay_buffer = ReplayBuffer(BUFFER_SIZE, seed)
model = DeepDeterministicPolicyGradients(actor_net, critic_net, action_noise, replay_buffer, action_size,
state_size, DISCOUNT_FACTOR, MINIBATCH_SIZE)
logdir = f"logs/{postfix}"
file_writer = tf.summary.create_file_writer(logdir)
file_writer.set_as_default()
rewards = np.zeros((MAX_EPISODES,))
for i in range(MAX_EPISODES):
state = environment.reset()
ep_reward = 0
while True:
a = model.get_action(state)
a = a.reshape((-1,))
next_state, r, t, _ = environment.step(a)
model.add_to_buffer(np.squeeze(state), a, r, t, np.squeeze(next_state))
model.update()
state = next_state.copy()
ep_reward += r
if t:
break
rewards[i] = ep_reward
text = 'Reward: {:.2f} |'.format(ep_reward)
tf.summary.scalar('Episode reward', data=ep_reward, step=i)
tf.summary.text("Reward Logs", text, step=i)
file_writer.flush()
print("Run {} | Episode: {:d} | {}".format(postfix, i, text))
for i in range(10):
state = environment.reset()
camera_pos = []
object_pos = []
while True:
a = model.actor_predict(state)
a = a.reshape((-1,))
next_state, r, t, info = environment.step(a)
camera_pos.append(info["cam_pos"].copy())
object_pos.append(info["obj_pos"].copy())
state = next_state.copy()
if t:
break
camera_pos = np.concatenate(camera_pos, axis=0)
object_pos = np.concatenate(object_pos, axis=0)
np.savez(f"test_run_{i}_{postfix}.npz", camera_pos=camera_pos, object_pos=object_pos)
return rewards
gpus = tf.config.list_physical_devices('GPU')
for gpu in gpus:
tf.config.set_logical_device_configuration(gpu,
[tf.config.LogicalDeviceConfiguration(memory_limit=3 * 1024)])
rewards = np.zeros((SEEDTORUN, MAX_EPISODES))
random_generator = np.random.RandomState(SEED)
for i in range(SEEDTORUN):
seed = random_generator.randint(1000000)
r = run_experiment(seed, f"seed_{i}")
rewards[i, :] = r.copy()
np.savez(f"results_chance_{CAMERA_FAILURE_PROB}.npz", rewards=rewards)
| [
"replay_buffer.ReplayBuffer",
"camera_environment.CameraEnvironment",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.config.list_physical_devices",
"neural_nets.CriticNet",
"tensorflow.keras.layers.Dense",
"tensorflow.config.LogicalDeviceConfiguration",
"sys.path.append",
"numpy.random.Ra... | [((11, 65), 'sys.path.append', 'sys.path.append', (['"""./Pendulum-problem/pendulum_problem"""'], {}), "('./Pendulum-problem/pendulum_problem')\n", (26, 65), False, 'import sys\n'), ((3886, 3924), 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), "('GPU')\n", (3917, 3924), True, 'import tensorflow as tf\n'), ((4115, 4150), 'numpy.zeros', 'np.zeros', (['(SEEDTORUN, MAX_EPISODES)'], {}), '((SEEDTORUN, MAX_EPISODES))\n', (4123, 4150), True, 'import numpy as np\n'), ((4170, 4197), 'numpy.random.RandomState', 'np.random.RandomState', (['SEED'], {}), '(SEED)\n', (4191, 4197), True, 'import numpy as np\n'), ((4342, 4412), 'numpy.savez', 'np.savez', (['f"""results_chance_{CAMERA_FAILURE_PROB}.npz"""'], {'rewards': 'rewards'}), "(f'results_chance_{CAMERA_FAILURE_PROB}.npz', rewards=rewards)\n", (4350, 4412), True, 'import numpy as np\n'), ((631, 672), 'tensorflow.keras.initializers.glorot_normal', 'tf.keras.initializers.glorot_normal', (['seed'], {}), '(seed)\n', (666, 672), True, 'import tensorflow as tf\n'), ((691, 729), 'camera_environment.CameraEnvironment', 'CameraEnvironment', (['CAMERA_FAILURE_PROB'], {}), '(CAMERA_FAILURE_PROB)\n', (708, 729), False, 'from camera_environment import CameraEnvironment\n'), ((1783, 1853), 'neural_nets.ActorNet', 'ActorNet', (['ACTOR_NET_STRUCTURE', 'action_bounds', 'TAU', 'ACTOR_LEARNING_RATE'], {}), '(ACTOR_NET_STRUCTURE, action_bounds, TAU, ACTOR_LEARNING_RATE)\n', (1791, 1853), False, 'from neural_nets import ActorNet, CriticNet\n'), ((1871, 1948), 'neural_nets.CriticNet', 'CriticNet', (['CRITIC_NET_STRUCTURE', 'TAU', 'CRITIC_LEARNING_RATE', 'GRADIENT_MAX_NORM'], {}), '(CRITIC_NET_STRUCTURE, TAU, CRITIC_LEARNING_RATE, GRADIENT_MAX_NORM)\n', (1880, 1948), False, 'from neural_nets import ActorNet, CriticNet\n'), ((2049, 2080), 'replay_buffer.ReplayBuffer', 'ReplayBuffer', (['BUFFER_SIZE', 'seed'], {}), '(BUFFER_SIZE, seed)\n', (2061, 2080), False, 'from replay_buffer import ReplayBuffer\n'), ((2093, 2239), 'ddpg.DeepDeterministicPolicyGradients', 'DeepDeterministicPolicyGradients', (['actor_net', 'critic_net', 'action_noise', 'replay_buffer', 'action_size', 'state_size', 'DISCOUNT_FACTOR', 'MINIBATCH_SIZE'], {}), '(actor_net, critic_net, action_noise,\n replay_buffer, action_size, state_size, DISCOUNT_FACTOR, MINIBATCH_SIZE)\n', (2125, 2239), False, 'from ddpg import DeepDeterministicPolicyGradients\n'), ((2331, 2368), 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', (['logdir'], {}), '(logdir)\n', (2360, 2368), True, 'import tensorflow as tf\n'), ((2416, 2441), 'numpy.zeros', 'np.zeros', (['(MAX_EPISODES,)'], {}), '((MAX_EPISODES,))\n', (2424, 2441), True, 'import numpy as np\n'), ((903, 961), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(600)'], {'kernel_initializer': 'kernel_init'}), '(600, kernel_initializer=kernel_init)\n', (924, 961), True, 'import tensorflow as tf\n'), ((991, 1027), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1025, 1027), True, 'import tensorflow as tf\n'), ((1057, 1079), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (1077, 1079), True, 'import tensorflow as tf\n'), ((1109, 1195), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(600)'], {'kernel_initializer': 'kernel_init', 'activation': 'tf.nn.relu'}), '(600, kernel_initializer=kernel_init, activation=tf.nn\n .relu)\n', (1130, 1195), True, 'import tensorflow as tf\n'), ((1220, 1276), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': 'kernel_init'}), '(1, kernel_initializer=kernel_init)\n', (1241, 1276), True, 'import tensorflow as tf\n'), ((1334, 1392), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(600)'], {'kernel_initializer': 'kernel_init'}), '(600, kernel_initializer=kernel_init)\n', (1355, 1392), True, 'import tensorflow as tf\n'), ((1421, 1457), 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), '()\n', (1455, 1457), True, 'import tensorflow as tf\n'), ((1486, 1508), 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), '()\n', (1506, 1508), True, 'import tensorflow as tf\n'), ((1537, 1623), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(600)'], {'kernel_initializer': 'kernel_init', 'activation': 'tf.nn.relu'}), '(600, kernel_initializer=kernel_init, activation=tf.nn\n .relu)\n', (1558, 1623), True, 'import tensorflow as tf\n'), ((1647, 1740), 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['action_size'], {'kernel_initializer': 'kernel_init', 'activation': 'tf.nn.tanh'}), '(action_size, kernel_initializer=kernel_init,\n activation=tf.nn.tanh)\n', (1668, 1740), True, 'import tensorflow as tf\n'), ((1998, 2022), 'numpy.zeros', 'np.zeros', (['(action_size,)'], {}), '((action_size,))\n', (2006, 2022), True, 'import numpy as np\n'), ((2992, 3051), 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Episode reward"""'], {'data': 'ep_reward', 'step': 'i'}), "('Episode reward', data=ep_reward, step=i)\n", (3009, 3051), True, 'import tensorflow as tf\n'), ((3060, 3104), 'tensorflow.summary.text', 'tf.summary.text', (['"""Reward Logs"""', 'text'], {'step': 'i'}), "('Reward Logs', text, step=i)\n", (3075, 3104), True, 'import tensorflow as tf\n'), ((3672, 3706), 'numpy.concatenate', 'np.concatenate', (['camera_pos'], {'axis': '(0)'}), '(camera_pos, axis=0)\n', (3686, 3706), True, 'import numpy as np\n'), ((3728, 3762), 'numpy.concatenate', 'np.concatenate', (['object_pos'], {'axis': '(0)'}), '(object_pos, axis=0)\n', (3742, 3762), True, 'import numpy as np\n'), ((3771, 3861), 'numpy.savez', 'np.savez', (['f"""test_run_{i}_{postfix}.npz"""'], {'camera_pos': 'camera_pos', 'object_pos': 'object_pos'}), "(f'test_run_{i}_{postfix}.npz', camera_pos=camera_pos, object_pos=\n object_pos)\n", (3779, 3861), True, 'import numpy as np\n'), ((4042, 4101), 'tensorflow.config.LogicalDeviceConfiguration', 'tf.config.LogicalDeviceConfiguration', ([], {'memory_limit': '(3 * 1024)'}), '(memory_limit=3 * 1024)\n', (4078, 4101), True, 'import tensorflow as tf\n'), ((2715, 2732), 'numpy.squeeze', 'np.squeeze', (['state'], {}), '(state)\n', (2725, 2732), True, 'import numpy as np\n'), ((2743, 2765), 'numpy.squeeze', 'np.squeeze', (['next_state'], {}), '(next_state)\n', (2753, 2765), True, 'import numpy as np\n')] |
import os
import json
import random
import numpy as np
import tensorflow as tf
from dataset import DataProcessor, get_dataset
class BeerProcessor(DataProcessor):
"""
Processor for the Beer dataset.
"""
def get_train_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "train.tsv")))
def get_dev_examples(self, data_dir):
return self._create_examples(
self._read_tsv(os.path.join(data_dir, "dev.tsv")))
def get_labels(self):
return ["0", "1"]
def _create_examples(self, lines):
examples = []
num_classes = len(self.get_labels())
for (i, line) in enumerate(lines):
if i == 0:
continue
text = self._convert_to_unicode(line[0])
label = int(self._convert_to_unicode(line[1]))
# convert label to one-hot format
one_hot_label = [0] * num_classes
one_hot_label[label] = 1
examples.append({"text": text, "label": one_hot_label})
return examples
def get_beer_dataset(data_dir, max_seq_length, word_threshold, balance=False):
"""
Return tf datasets (train and dev) and language index
for the beer dataset.
Assume train.tsv and dev.tsv are in the dir.
"""
processor = BeerProcessor()
train_examples = processor.get_train_examples(data_dir)
dev_examples = processor.get_dev_examples(data_dir)
print("Dataset: Beer Review")
print("Training samples %d, Validation sampels %d" %
(len(train_examples), len(dev_examples)))
# check the label balance
train_labels = np.array([0., 0.])
for train_example in train_examples:
train_labels += train_example["label"]
print("Training data: %d positive examples, %d negative examples." %
(train_labels[1], train_labels[0]))
dev_labels = np.array([0., 0.])
for dev_example in dev_examples:
dev_labels += dev_example["label"]
print("Dev data: %d positive examples, %d negative examples." %
(dev_labels[1], dev_labels[0]))
if balance == True:
random.seed(12252018)
print("Make the Training dataset class balanced.")
# make the beer dataset to be a balanced dataset
min_examples = int(min(train_labels[0], train_labels[1]))
pos_examples = []
neg_examples = []
for train_example in train_examples:
if train_example["label"][0] == 1:
neg_examples.append(train_example)
else:
pos_examples.append(train_example)
assert (len(neg_examples) == train_labels[0])
assert (len(pos_examples) == train_labels[1])
if train_labels[0] >= train_labels[1]:
# more negative examples
neg_examples = random.sample(neg_examples, min_examples)
else:
# more positive examples
pos_examples = random.sample(pos_examples, min_examples)
assert (len(pos_examples) == len(neg_examples))
train_examples = pos_examples + neg_examples
print(
"After balance training data: %d positive examples, %d negative examples."
% (len(pos_examples), len(neg_examples)))
return get_dataset(train_examples, dev_examples, max_seq_length,
word_threshold)
def get_big_beer_dataset(data_dir, max_seq_length, word_threshold, balance=False):
"""
Return tf datasets (train and dev) and language index
for the beer dataset.
Assume train.tsv and dev.tsv are in the dir.
"""
processor = BeerProcessor()
train_examples = processor.get_train_examples(data_dir)
dev_examples = processor.get_dev_examples(data_dir)
print("Dataset: Beer Review")
print("Training samples %d, Validation sampels %d" %
(len(train_examples), len(dev_examples)))
# check the label balance
train_labels = np.array([0., 0.])
for train_example in train_examples:
train_labels += train_example["label"]
print("Training data: %d positive examples, %d negative examples." %
(train_labels[1], train_labels[0]))
dev_labels = np.array([0., 0.])
for dev_example in dev_examples:
dev_labels += dev_example["label"]
print("Dev data: %d positive examples, %d negative examples." %
(dev_labels[1], dev_labels[0]))
if balance == True:
random.seed(12252018)
print("Make the Training dataset class balanced.")
# make the beer dataset to be a balanced dataset
max_examples = int(max(train_labels[0], train_labels[1]))
pos_examples = []
neg_examples = []
for train_example in train_examples:
if train_example["label"][0] == 1:
neg_examples.append(train_example)
else:
pos_examples.append(train_example)
assert (len(neg_examples) == train_labels[0])
assert (len(pos_examples) == train_labels[1])
tmp = []
if train_labels[0] >= train_labels[1]:
# more positive examples
for k in range(max_examples):
index = random.randint(0, len(pos_examples)-1)
tmp.append(pos_examples[index])
pos_examples = tmp
else:
# more negative examples
for k in range(max_examples):
index = random.randint(0, len(neg_examples)-1)
tmp.append(neg_examples[index])
neg_examples = tmp
assert (len(pos_examples) == len(neg_examples))
train_examples = pos_examples + neg_examples
print(
"After balance training data: %d positive examples, %d negative examples."
% (len(pos_examples), len(neg_examples)))
return get_dataset(train_examples, dev_examples, max_seq_length,
word_threshold)
def get_beer_annotation(annotation_path,
aspect,
max_seq_length,
word2idx,
neg_thres=0.4,
pos_thres=0.6):
"""
Read annotation from json and
return tf datasets of the beer annotation.
fpath -- annotations.json
aspects:
0 -- appearance
1 -- aroma
2 -- palate
rating >= 0.6 --> 1, rating <= 0.4 --> 0, other discarded.
Outputs:
data -- (num_examples, max_seq_length).
masks -- (num_examples, max_seq_length).
labels -- (num_examples, num_classes) in a one-hot format.
rationales -- binary sequence (num_examples, sequence_length)
"""
data = []
labels = []
masks = []
rationales = []
num_classes = 2
with open(annotation_path, "rt") as fin:
for counter, line in enumerate(fin):
item = json.loads(line)
# obtain the data
text_ = item["x"]
y = item["y"][aspect]
rationale = item[str(aspect)]
# check if the rationale is all zero
if len(rationale) == 0:
# no rationale for this aspect
continue
# process the label
if float(y) >= pos_thres:
y = 1
elif float(y) <= neg_thres:
y = 0
else:
continue
one_hot_label = [0] * num_classes
one_hot_label[y] = 1
# process the text
input_ids = []
if len(text_) > max_seq_length:
text_ = text_[0:max_seq_length]
for word in text_:
word = word.strip()
try:
input_ids.append(word2idx[word])
except:
# word is not exist in word2idx, use <unknown> token
input_ids.append(word2idx["<unknown>"])
# process mask
# The mask has 1 for real word and 0 for padding tokens.
input_mask = [1] * len(input_ids)
# zero-pad up to the max_seq_length.
while len(input_ids) < max_seq_length:
input_ids.append(0)
input_mask.append(0)
assert (len(input_ids) == max_seq_length)
assert (len(input_mask) == max_seq_length)
# construct rationale
binary_rationale = [0] * len(input_ids)
for zs in rationale:
start = zs[0]
end = zs[1]
if start >= max_seq_length:
continue
if end >= max_seq_length:
end = max_seq_length
for idx in range(start, end):
binary_rationale[idx] = 1
data.append(input_ids)
labels.append(one_hot_label)
masks.append(input_mask)
rationales.append(binary_rationale)
data = np.array(data, dtype=np.int32)
labels = np.array(labels, dtype=np.int32)
masks = np.array(masks, dtype=np.int32)
rationales = np.array(rationales, dtype=np.int32)
label_dis = np.sum(labels, axis=0)
print("Annotated rationales: %d" % data.shape[0])
print("Annotated data: %d positive examples, %d negative examples." %
(label_dis[1], label_dis[0]))
annotated_dataset = tf.data.Dataset.from_tensor_slices(
(data, masks, labels, rationales))
return annotated_dataset
| [
"random.sample",
"json.loads",
"tensorflow.data.Dataset.from_tensor_slices",
"os.path.join",
"dataset.get_dataset",
"random.seed",
"numpy.array",
"numpy.sum"
] | [((1664, 1684), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1672, 1684), True, 'import numpy as np\n'), ((1908, 1928), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (1916, 1928), True, 'import numpy as np\n'), ((3282, 3355), 'dataset.get_dataset', 'get_dataset', (['train_examples', 'dev_examples', 'max_seq_length', 'word_threshold'], {}), '(train_examples, dev_examples, max_seq_length, word_threshold)\n', (3293, 3355), False, 'from dataset import DataProcessor, get_dataset\n'), ((3954, 3974), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (3962, 3974), True, 'import numpy as np\n'), ((4198, 4218), 'numpy.array', 'np.array', (['[0.0, 0.0]'], {}), '([0.0, 0.0])\n', (4206, 4218), True, 'import numpy as np\n'), ((5818, 5891), 'dataset.get_dataset', 'get_dataset', (['train_examples', 'dev_examples', 'max_seq_length', 'word_threshold'], {}), '(train_examples, dev_examples, max_seq_length, word_threshold)\n', (5829, 5891), False, 'from dataset import DataProcessor, get_dataset\n'), ((2151, 2172), 'random.seed', 'random.seed', (['(12252018)'], {}), '(12252018)\n', (2162, 2172), False, 'import random\n'), ((4441, 4462), 'random.seed', 'random.seed', (['(12252018)'], {}), '(12252018)\n', (4452, 4462), False, 'import random\n'), ((8937, 8967), 'numpy.array', 'np.array', (['data'], {'dtype': 'np.int32'}), '(data, dtype=np.int32)\n', (8945, 8967), True, 'import numpy as np\n'), ((8985, 9017), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int32'}), '(labels, dtype=np.int32)\n', (8993, 9017), True, 'import numpy as np\n'), ((9034, 9065), 'numpy.array', 'np.array', (['masks'], {'dtype': 'np.int32'}), '(masks, dtype=np.int32)\n', (9042, 9065), True, 'import numpy as np\n'), ((9087, 9123), 'numpy.array', 'np.array', (['rationales'], {'dtype': 'np.int32'}), '(rationales, dtype=np.int32)\n', (9095, 9123), True, 'import numpy as np\n'), ((9145, 9167), 'numpy.sum', 'np.sum', (['labels'], {'axis': '(0)'}), '(labels, axis=0)\n', (9151, 9167), True, 'import numpy as np\n'), ((9377, 9446), 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(data, masks, labels, rationales)'], {}), '((data, masks, labels, rationales))\n', (9411, 9446), True, 'import tensorflow as tf\n'), ((2842, 2883), 'random.sample', 'random.sample', (['neg_examples', 'min_examples'], {}), '(neg_examples, min_examples)\n', (2855, 2883), False, 'import random\n'), ((2962, 3003), 'random.sample', 'random.sample', (['pos_examples', 'min_examples'], {}), '(pos_examples, min_examples)\n', (2975, 3003), False, 'import random\n'), ((6866, 6882), 'json.loads', 'json.loads', (['line'], {}), '(line)\n', (6876, 6882), False, 'import json\n'), ((327, 362), 'os.path.join', 'os.path.join', (['data_dir', '"""train.tsv"""'], {}), "(data_dir, 'train.tsv')\n", (339, 362), False, 'import os\n'), ((473, 506), 'os.path.join', 'os.path.join', (['data_dir', '"""dev.tsv"""'], {}), "(data_dir, 'dev.tsv')\n", (485, 506), False, 'import os\n')] |
import os
import datetime
from typing import List, Union, Dict
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
import torch
from torch import nn
from torch.utils.tensorboard import SummaryWriter
from torch import device as torchDevice
from genEM3.util import gpu
from genEM3.training.metrics import Metrics
class Trainer:
def __init__(self,
run_name: str,
run_root: str,
model: nn.Module,
optimizer: torch.optim.Optimizer,
criterion: nn.MSELoss,
data_loaders: Union[List, Dict],
num_epoch: int = 100,
log_int: int = 10,
device: str = 'cpu',
save: bool = False,
save_int: int = 1,
resume: bool = False,
gpu_id: int = None,
balance_factor: List = None):
"""balance_factor: is a list which contains the balance factor for each training epoch"""
self.run_name = run_name
self.run_root = run_root
self.model = model
self.optimizer = optimizer
self.criterion = criterion
self.num_epoch = num_epoch
self.log_int = log_int
self.save = save
self.save_int = save_int
self.resume = resume
self.balance_factor = balance_factor
if device == 'cuda':
gpu.get_gpu(gpu_id)
device = torch.device(torch.cuda.current_device())
self.device = torchDevice(device)
self.log_root = os.path.join(run_root, '.log', run_name)
self.data_loaders = data_loaders
# can only get the lengths when a single set of data loaders are used
if isinstance(data_loaders, dict):
self.data_lengths = dict(zip(self.data_loaders.keys(), [len(loader) for loader in self.data_loaders]))
else:
self.data_lengths = {}
if save:
if not os.path.exists(self.log_root):
os.makedirs(self.log_root)
def train(self):
if self.resume:
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ') Resuming training ... ')
checkpoint = torch.load(os.path.join(self.log_root, 'torch_model'))
self.model.load_state_dict(checkpoint['model_state_dict'])
self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
else:
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ') Starting training ... ')
writer = SummaryWriter(self.log_root)
self.model = self.model.to(self.device)
epoch = int(self.model.epoch) + 1
it = int(self.model.iteration)
sample_inds = dict()
for epoch in range(epoch, epoch + self.num_epoch):
if isinstance(self.data_loaders, list):
# each element of the list is a data loader for an epoch
loader_change_interval = self.num_epoch / len(self.data_loaders)
division_index, _ = divmod(epoch, loader_change_interval)
# Make sure that the index does not exceed the length of the data_loader list
index = round(min(division_index, len(self.data_loaders)-1))
cur_data_loaders = self.data_loaders[index]
else:
# same dataloaders for all epochs
cur_data_loaders = self.data_loaders
# Dictionary (of dictionaries) to collect four metrics from different phases for tensorboard
epoch_metric_names = ['epoch_loss', 'epoch_accuracy', 'precision/PPV', 'recall/TPR']
epoch_metric_dict = {metric_name: dict.fromkeys(cur_data_loaders.keys()) for metric_name in epoch_metric_names}
epoch_root = 'epoch_{:02d}'.format(epoch)
if not os.path.exists(os.path.join(self.log_root, epoch_root)):
os.makedirs(os.path.join(self.log_root, epoch_root))
for phase in cur_data_loaders.keys():
if phase == 'train':
self.model.train(True)
else:
self.model.train(False)
epoch_loss = 0
running_loss = 0.0
target_sum = 0
predicted_sum = 0
correct_sum = 0
batch_idx_start = 0
num_items = len(cur_data_loaders[phase].batch_sampler.sampler)
inputs_phase = -np.ones((num_items, 1, 140, 140)).astype(float)
outputs_phase = -np.ones((num_items, self.model.classifier.num_output)).astype(float)
predictions_phase = -np.ones(num_items).astype(int)
targets_phase = -np.ones(num_items).astype(int)
correct_phase = -np.ones(num_items).astype(int)
sample_ind_phase = []
for i, data in enumerate(cur_data_loaders[phase]):
it += 1
# copy input and targets to the device object
inputs = data['input'].to(self.device)
targets = data['target'].to(self.device)
sample_ind_batch = data['sample_idx']
sample_ind_phase.extend(sample_ind_batch)
# zero the parameter gradients
self.optimizer.zero_grad()
# forward + backward + optimize
outputs = self.model(inputs).squeeze()
loss = self.criterion(outputs, targets)
if phase == 'train':
loss.backward()
self.optimizer.step()
inputs, outputs, targets = Trainer.copy2cpu(inputs, outputs, targets)
predicted_classes = np.argmax(np.exp(outputs.detach().numpy()), axis=1)
predicted_sum += np.sum(predicted_classes)
target_classes = targets.detach().numpy()
target_sum += np.sum(target_classes)
correct_classes = predicted_classes == target_classes
correct_sum += np.sum(correct_classes)
if i > 0:
batch_idx_start = batch_idx_end
batch_idx_end = batch_idx_start + len(targets)
inputs_phase[batch_idx_start:batch_idx_end, :, :, :] = inputs.detach().numpy()
outputs_phase[batch_idx_start:batch_idx_end, :] = outputs.detach().numpy()
predictions_phase[batch_idx_start:batch_idx_end] = predicted_classes
targets_phase[batch_idx_start:batch_idx_end] = target_classes
correct_phase[batch_idx_start:batch_idx_end] = correct_classes
running_loss += loss.item()
epoch_loss += loss.item()
# Report fraction of clean data in mini batch
clean_num = float((targets == 0).sum())
debris_num = float((targets == 1).sum())
fraction_clean = clean_num / (debris_num + clean_num)
writer.add_scalars('Fraction_clean_samples', {phase: fraction_clean}, it)
if i % self.log_int == 0:
running_loss_log = float(running_loss) / batch_idx_end
running_accuracy_log = float(correct_sum) / batch_idx_end
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ')' + ' Phase: ' + phase +
', epoch: {}, batch: {}, running loss: {:0.4f}, running accuracy: {:0.3f} '.
format(epoch, i, running_loss_log, running_accuracy_log))
writer.add_scalars('running_loss', {phase: running_loss_log}, it)
writer.add_scalars('running_accuracy', {phase: running_accuracy_log}, it)
epoch_loss_log = float(epoch_loss) / num_items
epoch_accuracy_log = float(correct_sum) / num_items
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ')' + ' Phase: ' + phase +
', epoch: {}: epoch loss: {:0.4f}, epoch accuracy: {:0.3f} '.
format(epoch, epoch_loss_log, epoch_accuracy_log))
metrics = Metrics(
targets=targets_phase, outputs=outputs_phase, output_prob_fn=lambda x: np.exp(x[:, 1]),
sample_ind=sample_ind_phase)
metrics.confusion_table(
path_out=os.path.join(self.log_root, epoch_root, 'confusion_table_' + phase + '.csv'))
metrics.prediction_table(
path_out=os.path.join(self.log_root, epoch_root, 'prediction_table_' + phase + '.csv'))
# Set the current values of the epoch error metrics
cur_metrics = [epoch_loss_log, epoch_accuracy_log, metrics.metrics['PPV'], metrics.metrics['TPR']]
for i, metric_name in enumerate(epoch_metric_names):
epoch_metric_dict[metric_name][phase] = cur_metrics[i]
fig = Trainer.show_imgs(inputs=inputs_phase, outputs=outputs_phase, predictions=predictions_phase,
targets=targets_phase,
sample_ind=sample_ind_phase)
figname = 'image_examples_'
fig.savefig(os.path.join(self.log_root, epoch_root, figname + '_' + phase + '.png'))
writer.add_figure(figname + phase, fig, epoch)
fig = Trainer.show_classification_matrix(targets=targets_phase, predictions=predictions_phase,
metrics=metrics.metrics)
figname = 'targets_outputs_correct_'
fig.savefig(os.path.join(self.log_root, epoch_root, figname + '_' + phase + '.png'))
fig.savefig(os.path.join(self.log_root, epoch_root, figname + '_' + phase + '.eps'))
writer.add_figure(figname + phase, fig, epoch)
writer.add_pr_curve(
'pr_curve_'+phase, labels=targets_phase, predictions=np.exp(outputs_phase[:, 1]), global_step=epoch,
num_thresholds=50)
if self.save & (phase == 'train') & (epoch % self.save_int == 0):
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ') Writing model graph ... ')
# writer.add_graph(self.model, inputs)
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ') Saving model state... ')
self.model.epoch = torch.nn.Parameter(torch.tensor(epoch), requires_grad=False)
self.model.iteration = torch.nn.Parameter(torch.tensor(it), requires_grad=False)
torch.save({
'model_state_dict': self.model.state_dict(),
}, os.path.join(self.log_root, epoch_root, 'model_state_dict'))
torch.save({
'optimizer_state_dict': self.optimizer.state_dict()
}, os.path.join(self.log_root, 'optimizer_state_dict'))
# write the epoch related metrics to the tensorboard
for metric_name in epoch_metric_names:
writer.add_scalars(metric_name, epoch_metric_dict[metric_name], epoch)
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ') Finished training ... ')
writer.close()
print('(' + datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S") + ') Closed writer ... ')
@staticmethod
def copy2cpu(inputs, outputs, targets):
if inputs.is_cuda:
inputs = inputs.cpu()
if outputs.is_cuda:
outputs = outputs.cpu()
if targets.is_cuda:
targets = targets.cpu()
return inputs, outputs, targets
@staticmethod
def n1hw_to_n3hw(data):
return data.cpu().repeat(1, 3, 1, 1)
@staticmethod
def show_img(inputs, outputs, idx):
inputs, outputs = Trainer.copy2cpu(inputs, outputs)
fig, axs = plt.subplots(1, 2, figsize=(4, 3))
axs[0].imshow(inputs[idx].data.numpy().squeeze(), cmap='gray')
axs[1].imshow(outputs[idx].data.numpy().squeeze(), cmap='gray')
return fig
@staticmethod
def show_imgs(inputs, outputs, predictions, targets, sample_ind, class_idx=1, plot_ind=None):
if plot_ind is None:
plot_ind = list(range(5))
fig, axs = plt.subplots(2, len(plot_ind), figsize=(3 * len(plot_ind), 6))
for i, sample_idx in enumerate(np.asarray(sample_ind)[plot_ind]):
input = inputs[i, 0, :, :].squeeze()
axs[0, i].imshow(input, cmap='gray')
axs[0, i].axis('off')
output = np.tile(np.exp((outputs[i][class_idx])), (int(input.shape[0]/3), int(input.shape[1])))
prediction = np.tile(int(predictions[i]), (int(input.shape[0]/3), int(input.shape[1])))
target = np.tile(int(targets[i]), (int(input.shape[0]/3), int(input.shape[1])))
fused = np.concatenate((output, prediction, target), axis=0)
axs[1, i].imshow(fused, cmap='gray', vmin=0, vmax=1)
axs[1, i].text(0.5, 0.875, 'sample_idx: {}'.format(sample_idx),
transform=axs[1, i].transAxes, ha='center', va='center', c=[0.8, 0.8, 0.2])
axs[1, i].text(0.5, 0.75, 'output (class {:d}): {:01.2f}'.format(class_idx, np.exp((outputs[i][class_idx]))),
transform=axs[1, i].transAxes, ha='center', va='center', c=[0.8, 0.8, 0.2])
axs[1, i].text(0.5, 0.5, 'prediction class: {:d}'.format(int(predictions[i])),
transform=axs[1, i].transAxes, ha='center', va='center', c=[0.5, 0.5, 0.5])
axs[1, i].text(0.5, 0.2, 'target class: {:d}'.format(int(targets[i])),
transform=axs[1, i].transAxes, ha='center', va='center', c=[0.5, 0.5, 0.5])
axs[1, i].axis('off')
axs[0, i].set_ylabel('sample_idx: {}'.format(sample_idx))
plt.tight_layout()
return fig
@staticmethod
def show_classification_matrix(targets, predictions, metrics):
targets_pr = targets.copy().astype(int)
predictions_pr = predictions.copy().astype(int)
correct_pr = ((targets_pr == 0) == (predictions_pr == 0)) | ((targets_pr == 1) == (predictions_pr == 1)) + 2
code_pr = targets_pr.copy()
code_pr[metrics['TN_idx']] = 4
code_pr[metrics['TP_idx']] = 5
code_pr[predictions_pr > targets_pr] = 6
code_pr[predictions_pr < targets_pr] = 7
mat = np.stack((targets_pr, predictions_pr, correct_pr, code_pr), axis=0)
colors = [[0.0, 0.0, 0.0, 1],
[1.0, 1.0, 1.0, 1],
[1.0, 0.0, 0.0, 1],
[0.0, 1.0, 0.0, 1],
[0.4, 0.1, 0.9, 1],
[0.3, 0.5, 0.9, 1],
[0.9, 0.4, 0.1, 1],
[0.9, 0.1, 0.5, 1]]
cmap = ListedColormap(colors=colors)
fig_width_mult = min(max([0.5, len(targets)/2000]), 3)
fig, axs = plt.subplots(figsize=(12*fig_width_mult, 6))
axs.matshow(mat, cmap=cmap, vmin=0, vmax=7)
axs.set_yticks([0, 1, 2, 3])
axs.set_yticklabels(['targets', 'outputs', 'accuracy', 'confusion'])
axs.set_aspect(10)
bbox = axs.get_position().bounds
axs2 = plt.axes((bbox[0], 0.1, bbox[2], 0.2), sharex=axs)
axs2.text(0.010, 1.00, 'target|output', c=(0.2, 0.2, 0.2), weight='bold', transform=axs2.transAxes)
axs2.text(0.017, 0.75, 'artifact: {}'.format(metrics['P']), c=(0.2, 0.2, 0.2), backgroundcolor=colors[1], transform=axs2.transAxes)
axs2.text(0.017, 0.50, 'no artifact: {}'.format(metrics['N']), c=(0.8, 0.8, 0.8), backgroundcolor=colors[0], transform=axs2.transAxes)
axs2.text(0.27, 1.00, 'accuracy', c=(0.2, 0.2, 0.2), weight='bold', transform=axs2.transAxes)
axs2.text(0.20, 0.75, 'frac correct: {:03d}/{:03d}={:01.2f}'.format(metrics['TP'] +
metrics['TN'], len(targets), (metrics['TP'] +
metrics['TN'])/len(targets)), c=(0.2, 0.2, 0.2), backgroundcolor=colors[3],
transform=axs2.transAxes)
axs2.text(0.20, 0.50, 'frac incorrect: {:03d}/{:03d}={:01.2f}'.format(metrics['FP'] +
metrics['FN'], len(targets), (metrics['FP'] +
metrics['FN'])/len(targets)), c=(0.2, 0.2, 0.2), backgroundcolor=colors[2],
transform=axs2.transAxes)
axs2.text(0.60, 1.00, 'confusion', c=(0.2, 0.2, 0.2), weight='bold', transform=axs2.transAxes)
axs2.text(0.50, 0.75, 'TP: {:01.0f}'.format(metrics['TP']).ljust(12), c=(0.8, 0.8, 0.8),
backgroundcolor=colors[5], transform=axs2.transAxes)
axs2.text(0.60, 0.75, 'FP: {:01.0f}'.format(metrics['FP']).ljust(12), c=(0.2, 0.2, 0.2),
backgroundcolor=colors[6], transform=axs2.transAxes)
axs2.text(0.50, 0.50, 'FN: {:01.0f}'.format(metrics['FN']).ljust(12), c=(0.2, 0.2, 0.2),
backgroundcolor=colors[7], transform=axs2.transAxes)
axs2.text(0.60, 0.50, 'TN: {:01.0f}'.format(metrics['TN']).ljust(12), c=(0.8, 0.8, 0.8),
backgroundcolor=colors[4], transform=axs2.transAxes)
axs2.text(0.70, 0.75, 'Precision: {:01.2f}'.format(metrics['PPV']).ljust(20), c=(0.2, 0.2, 0.2),
backgroundcolor=(0.7, 0.7, 0.7), transform=axs2.transAxes)
axs2.text(0.70, 0.50, 'Recall: {:01.2f}'.format(metrics['TPR']).ljust(20), c=(0.2, 0.2, 0.2),
backgroundcolor=(0.7, 0.7, 0.7), transform=axs2.transAxes)
axs2.axis('off')
return fig
| [
"torch.utils.tensorboard.SummaryWriter",
"os.path.exists",
"numpy.asarray",
"matplotlib.colors.ListedColormap",
"numpy.exp",
"numpy.stack",
"numpy.concatenate",
"torch.cuda.current_device",
"numpy.ones",
"genEM3.util.gpu.get_gpu",
"matplotlib.pyplot.axes",
"torch.device",
"os.makedirs",
"o... | [((1560, 1579), 'torch.device', 'torchDevice', (['device'], {}), '(device)\n', (1571, 1579), True, 'from torch import device as torchDevice\n'), ((1604, 1644), 'os.path.join', 'os.path.join', (['run_root', '""".log"""', 'run_name'], {}), "(run_root, '.log', run_name)\n", (1616, 1644), False, 'import os\n'), ((2606, 2634), 'torch.utils.tensorboard.SummaryWriter', 'SummaryWriter', (['self.log_root'], {}), '(self.log_root)\n', (2619, 2634), False, 'from torch.utils.tensorboard import SummaryWriter\n'), ((12255, 12289), 'matplotlib.pyplot.subplots', 'plt.subplots', (['(1)', '(2)'], {'figsize': '(4, 3)'}), '(1, 2, figsize=(4, 3))\n', (12267, 12289), True, 'import matplotlib.pyplot as plt\n'), ((14267, 14285), 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), '()\n', (14283, 14285), True, 'import matplotlib.pyplot as plt\n'), ((14841, 14908), 'numpy.stack', 'np.stack', (['(targets_pr, predictions_pr, correct_pr, code_pr)'], {'axis': '(0)'}), '((targets_pr, predictions_pr, correct_pr, code_pr), axis=0)\n', (14849, 14908), True, 'import numpy as np\n'), ((15229, 15258), 'matplotlib.colors.ListedColormap', 'ListedColormap', ([], {'colors': 'colors'}), '(colors=colors)\n', (15243, 15258), False, 'from matplotlib.colors import ListedColormap\n'), ((15341, 15387), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(12 * fig_width_mult, 6)'}), '(figsize=(12 * fig_width_mult, 6))\n', (15353, 15387), True, 'import matplotlib.pyplot as plt\n'), ((15635, 15685), 'matplotlib.pyplot.axes', 'plt.axes', (['(bbox[0], 0.1, bbox[2], 0.2)'], {'sharex': 'axs'}), '((bbox[0], 0.1, bbox[2], 0.2), sharex=axs)\n', (15643, 15685), True, 'import matplotlib.pyplot as plt\n'), ((1446, 1465), 'genEM3.util.gpu.get_gpu', 'gpu.get_gpu', (['gpu_id'], {}), '(gpu_id)\n', (1457, 1465), False, 'from genEM3.util import gpu\n'), ((13246, 13298), 'numpy.concatenate', 'np.concatenate', (['(output, prediction, target)'], {'axis': '(0)'}), '((output, prediction, target), axis=0)\n', (13260, 13298), True, 'import numpy as np\n'), ((1500, 1527), 'torch.cuda.current_device', 'torch.cuda.current_device', ([], {}), '()\n', (1525, 1527), False, 'import torch\n'), ((2007, 2036), 'os.path.exists', 'os.path.exists', (['self.log_root'], {}), '(self.log_root)\n', (2021, 2036), False, 'import os\n'), ((2054, 2080), 'os.makedirs', 'os.makedirs', (['self.log_root'], {}), '(self.log_root)\n', (2065, 2080), False, 'import os\n'), ((2272, 2314), 'os.path.join', 'os.path.join', (['self.log_root', '"""torch_model"""'], {}), "(self.log_root, 'torch_model')\n", (2284, 2314), False, 'import os\n'), ((12759, 12781), 'numpy.asarray', 'np.asarray', (['sample_ind'], {}), '(sample_ind)\n', (12769, 12781), True, 'import numpy as np\n'), ((12955, 12984), 'numpy.exp', 'np.exp', (['outputs[i][class_idx]'], {}), '(outputs[i][class_idx])\n', (12961, 12984), True, 'import numpy as np\n'), ((3899, 3938), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root'], {}), '(self.log_root, epoch_root)\n', (3911, 3938), False, 'import os\n'), ((3969, 4008), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root'], {}), '(self.log_root, epoch_root)\n', (3981, 4008), False, 'import os\n'), ((5929, 5954), 'numpy.sum', 'np.sum', (['predicted_classes'], {}), '(predicted_classes)\n', (5935, 5954), True, 'import numpy as np\n'), ((6051, 6073), 'numpy.sum', 'np.sum', (['target_classes'], {}), '(target_classes)\n', (6057, 6073), True, 'import numpy as np\n'), ((6183, 6206), 'numpy.sum', 'np.sum', (['correct_classes'], {}), '(correct_classes)\n', (6189, 6206), True, 'import numpy as np\n'), ((9507, 9578), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root', "(figname + '_' + phase + '.png')"], {}), "(self.log_root, epoch_root, figname + '_' + phase + '.png')\n", (9519, 9578), False, 'import os\n'), ((9918, 9989), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root', "(figname + '_' + phase + '.png')"], {}), "(self.log_root, epoch_root, figname + '_' + phase + '.png')\n", (9930, 9989), False, 'import os\n'), ((10019, 10090), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root', "(figname + '_' + phase + '.eps')"], {}), "(self.log_root, epoch_root, figname + '_' + phase + '.eps')\n", (10031, 10090), False, 'import os\n'), ((13631, 13660), 'numpy.exp', 'np.exp', (['outputs[i][class_idx]'], {}), '(outputs[i][class_idx])\n', (13637, 13660), True, 'import numpy as np\n'), ((8632, 8708), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root', "('confusion_table_' + phase + '.csv')"], {}), "(self.log_root, epoch_root, 'confusion_table_' + phase + '.csv')\n", (8644, 8708), False, 'import os\n'), ((8781, 8858), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root', "('prediction_table_' + phase + '.csv')"], {}), "(self.log_root, epoch_root, 'prediction_table_' + phase + '.csv')\n", (8793, 8858), False, 'import os\n'), ((10266, 10293), 'numpy.exp', 'np.exp', (['outputs_phase[:, 1]'], {}), '(outputs_phase[:, 1])\n', (10272, 10293), True, 'import numpy as np\n'), ((10788, 10807), 'torch.tensor', 'torch.tensor', (['epoch'], {}), '(epoch)\n', (10800, 10807), False, 'import torch\n'), ((10892, 10908), 'torch.tensor', 'torch.tensor', (['it'], {}), '(it)\n', (10904, 10908), False, 'import torch\n'), ((11056, 11115), 'os.path.join', 'os.path.join', (['self.log_root', 'epoch_root', '"""model_state_dict"""'], {}), "(self.log_root, epoch_root, 'model_state_dict')\n", (11068, 11115), False, 'import os\n'), ((11249, 11300), 'os.path.join', 'os.path.join', (['self.log_root', '"""optimizer_state_dict"""'], {}), "(self.log_root, 'optimizer_state_dict')\n", (11261, 11300), False, 'import os\n'), ((4521, 4554), 'numpy.ones', 'np.ones', (['(num_items, 1, 140, 140)'], {}), '((num_items, 1, 140, 140))\n', (4528, 4554), True, 'import numpy as np\n'), ((4602, 4656), 'numpy.ones', 'np.ones', (['(num_items, self.model.classifier.num_output)'], {}), '((num_items, self.model.classifier.num_output))\n', (4609, 4656), True, 'import numpy as np\n'), ((4708, 4726), 'numpy.ones', 'np.ones', (['num_items'], {}), '(num_items)\n', (4715, 4726), True, 'import numpy as np\n'), ((4772, 4790), 'numpy.ones', 'np.ones', (['num_items'], {}), '(num_items)\n', (4779, 4790), True, 'import numpy as np\n'), ((4836, 4854), 'numpy.ones', 'np.ones', (['num_items'], {}), '(num_items)\n', (4843, 4854), True, 'import numpy as np\n'), ((8496, 8511), 'numpy.exp', 'np.exp', (['x[:, 1]'], {}), '(x[:, 1])\n', (8502, 8511), True, 'import numpy as np\n'), ((11525, 11548), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11546, 11548), False, 'import datetime\n'), ((11653, 11676), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (11674, 11676), False, 'import datetime\n'), ((2152, 2175), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2173, 2175), False, 'import datetime\n'), ((2504, 2527), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (2525, 2527), False, 'import datetime\n'), ((10468, 10491), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10489, 10491), False, 'import datetime\n'), ((10646, 10669), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (10667, 10669), False, 'import datetime\n'), ((8129, 8152), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (8150, 8152), False, 'import datetime\n'), ((7503, 7526), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (7524, 7526), False, 'import datetime\n')] |
# imports
from numpy import zeros
from numpy.random import randint,seed
from tensorflow.keras.utils import to_categorical
from utils import configs
# end imports
seed(42)
'''
-------------------------------------------------------------------------------------------
DataHandler : in this class we handle all stuff related to data shaping and/or
data to tensors creations this returned data will be helpful for training
and exploited by golois.cpp util function as (getBatch and getValidation).
-------------------------------------------------------------------------------------------
'''
class DataHandler(object):
def __init__(self,n_samples=configs.n_samples,dim=configs.dim,n_moves=configs.n_moves,n_planes=configs.n_planes) -> None:
super().__init__()
self.n_samples = n_samples
self.dim = dim
self.n_moves = n_moves
self.n_planes = n_planes
def get_data(self):
# Inputs
input_data = randint(2, size=(self.n_samples, self.dim, self.dim, self.n_planes)).astype ('float32')
# Outputs
policy = to_categorical(randint(self.n_moves, size=(self.n_samples,)))
value = randint(2, size=(self.n_samples,)).astype ('float32')
# For Get_Batch & Get_Validation
end = randint(2, size=(self.n_samples, self.dim, self.dim, 2)).astype ('float32')
groups = zeros((self.n_samples, self.dim, self.dim, 1)).astype ('float32')
return input_data , policy , value , end , groups | [
"numpy.random.randint",
"numpy.random.seed",
"numpy.zeros"
] | [((163, 171), 'numpy.random.seed', 'seed', (['(42)'], {}), '(42)\n', (167, 171), False, 'from numpy.random import randint, seed\n'), ((1168, 1213), 'numpy.random.randint', 'randint', (['self.n_moves'], {'size': '(self.n_samples,)'}), '(self.n_moves, size=(self.n_samples,))\n', (1175, 1213), False, 'from numpy.random import randint, seed\n'), ((1028, 1096), 'numpy.random.randint', 'randint', (['(2)'], {'size': '(self.n_samples, self.dim, self.dim, self.n_planes)'}), '(2, size=(self.n_samples, self.dim, self.dim, self.n_planes))\n', (1035, 1096), False, 'from numpy.random import randint, seed\n'), ((1232, 1266), 'numpy.random.randint', 'randint', (['(2)'], {'size': '(self.n_samples,)'}), '(2, size=(self.n_samples,))\n', (1239, 1266), False, 'from numpy.random import randint, seed\n'), ((1342, 1398), 'numpy.random.randint', 'randint', (['(2)'], {'size': '(self.n_samples, self.dim, self.dim, 2)'}), '(2, size=(self.n_samples, self.dim, self.dim, 2))\n', (1349, 1398), False, 'from numpy.random import randint, seed\n'), ((1435, 1481), 'numpy.zeros', 'zeros', (['(self.n_samples, self.dim, self.dim, 1)'], {}), '((self.n_samples, self.dim, self.dim, 1))\n', (1440, 1481), False, 'from numpy import zeros\n')] |
from __future__ import print_function
__author__ = 'rogerjiang'
"""
Purposes:
1. Visualization of training data
2. Evaluation of training data augmentation
Notes on the data files:
train_wkt_v4.csv: training labels with ImageId, ClassType, MultipolygonWKT
train_geoson_v3 (similar to train_wkt_v4.csv): training labels with ImageId
(folder name), ClassType (detailed, name of .geojson files), Multipolygon
(data of .geojson files, also contains detailed ClassType information)
grid_size.csv: sizes of all images with ImageId, 0<Xmax<1, -1<Ymin<0
(size of images, assuming origin (0,0) is at the upper left corner)
three_band: all 3-band images, in name of ImageId.tif
sixteen_band: all 16-band images, in name of ImageId_{A,M,P}.tif
sample_submission.csv: submission with ImageId, ClassType, MultipolygonWKT
If the order of dimension in all the image data is x-y, this order is switched
to y-x in grid_sizes and wkt data from train_wkt_v4.
-------------
Basically, the combination of ClassType and MultipolygonWKT gives the voxel-wise
class labels.
The 'three_band' and 'sixteen_band' folders are the input for training.
ImageId connects the class labels with the training data.
MultipolygonWKT is relative position in the figure and can be converted to pixel
coordinate with the grid_size (Xmax, Ymin)
There is slightly mismatch between the three_band and sixteen_band data due to
delay in measurements, such that they should be aligned.
"""
import tifffile
import shapely.wkt as wkt
import pandas as pd
import cv2
import numpy as np
import matplotlib.pyplot as plt
from descartes.patch import PolygonPatch
from matplotlib.patches import Patch
import random
from matplotlib import cm
from shapely import affinity
from shapely.affinity import scale
from shapely.geometry import MultiPolygon, Polygon
from collections import defaultdict
import sys
import seaborn as sns
import os
data_dir = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..')
CLASSES = {
1: 'Buildings',
2: 'Struct',
3: 'Road',
4: 'Track',
5: 'Trees',
6: 'Crops',
7: 'Waterway',
8: 'Standing water',
9: 'Vehicle Large',
10: 'Vehicle Small'
}
COLORS = {
1: '0.7',
2: '0.4',
3: '#b35806',
4: '#dfc27d',
5: '#1b7837',
6: '#a6dba0',
7: '#74add1',
8: '#4575b4',
9: '#f46d43',
10: '#d73027',
}
# ZORDER defines the priority for plotting overlay of class labels.
ZORDER = {
1: 6,
2: 5,
3: 4,
4: 1,
5: 3,
6: 2,
7: 7,
8: 8,
9: 9,
10: 10,
}
# train_wkt_v4.csv stores the polygon data for all images and classes. The polygons
# uses relative coordinate positions.
_df = pd.read_csv(data_dir + '/data/train_wkt_v4.csv',
names=['ImageId', 'ClassId', 'MultipolygonWKT'], skiprows=1)
# grid_sizes.csv stores the relative size of for each image. The origin is at the
# upper left corner, which means Xmax is positive and Ymin is negative.
_df1 = pd.read_csv(data_dir + '/data/grid_sizes.csv',
names=['ImageId', 'Xmax', 'Ymin'], skiprows=1)
# sample_submission.csv is the file for submission
_df2 = pd.read_csv(data_dir + '/data/sample_submission.csv',
names=['ImageId', 'ClassId', 'MultipolygonWKT'], skiprows=1)
# Two of the training images were photoed at the same spot at different times,
# under different weather condition. It's up to you to decide whether to
# exclude the duplicates ('6110_1_2', '6110_3_1'). Here I exclude none of them.
duplicates = []
train_wkt_v4 = _df[np.invert(np.in1d(_df.ImageId, duplicates))]
grid_sizes = _df1[np.invert(np.in1d(_df1.ImageId, duplicates))]
test_wkt = _df2
all_train_names = sorted(train_wkt_v4.ImageId.unique())
all_test_names = sorted(test_wkt.ImageId.unique())
train_IDs_dict = dict(zip(np.arange(len(all_train_names)), all_train_names))
train_IDs_dict_r = dict(zip(all_train_names, np.arange(len(all_train_names))))
test_IDs_dict = dict(zip(np.arange(len(all_test_names)), all_test_names))
test_IDs_dict_r = dict(zip(all_test_names, np.arange(len(all_test_names))))
def resize(im, shape_out):
"""
Resize an image using cv2.
Note: x and y are switched in cv2.resize
:param im:
:param shape_out:
:return:
"""
return cv2.resize(im, (shape_out[1], shape_out[0]),
interpolation=cv2.INTER_CUBIC)
def affine_transform(img, warp_matrix, out_shape):
"""
Apply affine transformation using warp_matrix to img, and perform
interpolation as needed
:param img:
:param warp_matrix:
:param out_shape:
:return:
"""
new_img = cv2.warpAffine(img, warp_matrix, (out_shape[1], out_shape[0]),
flags = cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP,
borderMode= cv2.BORDER_REPLICATE)
# new_img[new_img == 0] = np.average(new_img)
return new_img
def get_polygon_list(image_id, class_type):
"""
Load the wkt data (relative coordiantes of polygons) from csv file and
returns a list of polygons (in the format of shapely multipolygon)
:param image_id:
:param class_type:
:return:
"""
all_polygon = train_wkt_v4[train_wkt_v4.ImageId == image_id]
polygon = all_polygon[all_polygon.ClassId == class_type].MultipolygonWKT
# For empty polygon, polygon is a string of 'MULTIPOLYGON EMPTY'
# wkt.loads will automatically handle this and len(polygon_list) returns 0
# But polygon_list will never be None!
polygon_list = wkt.loads(polygon.values[0])
return polygon_list
def convert_coordinate_to_raster(coords, img_size, xymax):
"""
Converts the relative coordinates of contours into raster coordinates.
:param coords:
:param img_size:
:param xymax:
:return:
"""
xmax, ymax = xymax
width, height = img_size
coords[:, 0] *= (height + 1) / xmax
coords[:, 1] *= (width + 1) / ymax
coords = np.round(coords).astype(np.int32)
return coords
def generate_contours(polygon_list, img_size, xymax):
"""
Convert shapely MultipolygonWKT type of data (relative coordinate) into
list type of date for polygon raster coordinates
:param polygon_list:
:param img_size:
:param xymax:
:return:
"""
if len(polygon_list) == 0:
return [], []
to_ind = lambda x: np.array(list(x)).astype(np.float32)
perim_list = [convert_coordinate_to_raster(to_ind(poly.exterior.coords),
img_size, xymax)
for poly in polygon_list]
inter_list = [convert_coordinate_to_raster(
to_ind(poly.coords), img_size, xymax)
for poly_ex in polygon_list for poly in poly_ex.interiors]
return perim_list, inter_list
def generate_mask_from_contours(img_size, perim_list, inter_list, class_id = 1):
"""
Create pixel-wise mask from contours from polygon of raster coordinates
:param img_size:
:param perim_list:
:param inter_list:
:param class_id:
:return:
"""
mask = np.zeros(img_size, np.uint8)
if perim_list is None:
return mask
# mask should match the dimension of image
# however, cv2.fillpoly assumes the x and y axes are oppsite between mask and
# perim_list (inter_list)
cv2.fillPoly(mask, perim_list, class_id)
cv2.fillPoly(mask, inter_list, 0)
return mask
def plot_polygon(polygon_list, ax, scaler=None, alpha=0.7):
"""
polygon_list is a dictionary of polygon list for all class types.
key is the class id, and value is the polygon list.
:param polygon_list:
:param ax:
:param scaler:
:param alpha:
:return:
"""
legend_list = []
for cl in CLASSES:
# Patch is a function in the matplotlib.patches module
legend_list.append(Patch(
color=COLORS[cl],
label='{}: ({})'.format(CLASSES[cl], len(polygon_list[cl]))))
for polygon in polygon_list[cl]:
if scaler is not None:
# affinity is a function from shapely
polygon_rescale = affinity.scale(polygon, xfact=scaler[1],
yfact=scaler[0],
origin=[0., 0., 0.])
else:
polygon_rescale = polygon
# PolygonPatch is a function from descartes.patch module
# polygon_list is in relative coordinates and they are
# generated from get_polygon_list and are further
# converted to raster coordinates through scaler.
patch = PolygonPatch(polygon=polygon_rescale, color=COLORS[cl],
lw=0, alpha=alpha, zorder=ZORDER[cl])
ax.add_patch(patch)
return legend_list
def plot_image(img, ax, image_id, image_key, selected_channel=None):
"""
Plot an selected channels of img into ax.
:param img:
:param ax:
:param image_id:
:param image_key:
:param selected_channel:
:return:
"""
title_suffix = ''
if selected_channel is not None:
img = img[:, :, selected_channel]
title_suffix = '(' + ','.join(repr(i) for i in selected_channel) + ')'
ax.imshow(img)
#ax.set_title(image_id + '-' + image_key + title_suffix)
#ax.set_xlabel(img.shape[0])
#ax.set_ylabel(img.shape[1])
ax.set_xticks([])
ax.set_yticks([])
def plot_overlay(img, ax, image_id, image_key, polygon_list, scaler=(1., 1.),
x_range=None, y_range=None, label=None, alpha=1.0,
rgb=False):
"""
Plot image with polygon overlays
:param img:
:param ax:
:param image_id:
:param image_key:
:param polygon_list:
:param scaler:
:return:
"""
# cm is a function from matplotlib
if not x_range:
x_range = [0, img.shape[0]]
if not y_range:
y_range = [0, img.shape[1]]
if rgb:
ax.imshow(img, vmax=1., vmin=0.)
else:
ax.imshow(scale_percentile(rgb2gray(img)),
cmap=cm.gray, vmax=1., vmin=0.)
#ax.set_xlabel(x_range[1] - x_range[0])
#ax.set_ylabel(y_range[1] - y_range[0])
legend = plot_polygon(polygon_list, ax, scaler, alpha=alpha)
ax.set_xticks([])
ax.set_yticks([])
#ax.set_title(image_id + '-' + image_key + '-Overlay')
return legend
def scale_percentile(img):
"""
Scale an image's 1 - 99 percentiles into 0 - 1 for display
:param img:
:return:
"""
orig_shape = img.shape
if len(orig_shape) == 3:
img = np.reshape(img,
[orig_shape[0] * orig_shape[1], orig_shape[2]]
).astype(np.float32)
elif len(orig_shape) == 2:
img = np.reshape(img, [orig_shape[0] * orig_shape[1]]).astype(np.float32)
mins = np.percentile(img, 1, axis=0)
maxs = np.percentile(img, 99, axis=0) - mins
img = (img - mins) / maxs
img.clip(0, 1, out=img)
img = np.reshape(img, orig_shape)
return img
def rgb2gray(rgb):
"""
Converts rgb images to grey scale images
:param rgb:
:return:
"""
return np.dot(rgb[..., :3], [0.299, 0.587, 0.144])
def crop(img, crop_coord):
"""
Crop out an patch from img, given the coordinates
:param img:
:param crop_coord:
:return:
"""
width, height = img.shape[0], img.shape[1]
x_lim = crop_coord[0].astype(np.int)
y_lim = crop_coord[1].astype(np.int)
assert 0 <= x_lim[0] < x_lim[1] <= width
assert 0 <= y_lim[0] < y_lim[1] <= height
return img[x_lim[0]: x_lim[1], y_lim[0]: y_lim[1]]
def get_image_area(image_id):
"""
Calculate the area of an image
:param image_id:
:return:
"""
xmax = grid_sizes[grid_sizes.ImageId == image_id].Xmax.values[0]
ymin = grid_sizes[grid_sizes.ImageId == image_id].Ymin.values[0]
return abs(xmax * ymin)
def image_stat(image_id):
"""
Return the statistics ofd an image as a pd dataframe
:param image_id:
:return:
"""
counts, total_area, mean_area, std_area = {}, {}, {}, {}
img_area = get_image_area(image_id)
for cl in CLASSES:
polygon_list = get_polygon_list(image_id, cl)
counts[cl] = len(polygon_list)
if len(polygon_list) > 0:
total_area[cl] = np.sum([poly.area for poly in polygon_list])\
/ img_area * 100.
mean_area[cl] = np.mean([poly.area for poly in polygon_list])\
/ img_area * 100.
std_area[cl] = np.std([poly.area for poly in polygon_list])\
/ img_area * 100.
return pd.DataFrame({'Class': CLASSES, 'Counts': counts,
'TotalArea': total_area, 'MeanArea': mean_area,
'STDArea': std_area})
def collect_stats():
"""
Collect the area statistics for all images and concatenate them
:return:
"""
stats = []
total_no = len(all_train_names) - 1
for image_no, image_id in enumerate(all_train_names):
stat = image_stat(image_id)
stat['ImageId'] = image_id
stats.append(stat)
sys.stdout.write('\rCollecting class stats [{}{}] {}%'.\
format('=' * image_no,
' ' * (total_no - image_no),
100 * image_no / total_no))
sys.stdout.flush()
sys.stdout.write('\n')
return pd.concat(stats)
def calculate_class_weights():
"""
:return: class-wise true-label-area / false-label-area as a dictionary
"""
df = collect_stats()
df = df.fillna(0)
df = df.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
df = df.sum(axis=1)
df = df / (2500. - df)
return df.to_dict()
def plot_stats(value, title):
"""
Plot 2D grid plot of statistics of MeanArea, Counts, TotalArea, STDArea.
:param value:
:param title:
:return:
"""
stats = collect_stats()
pvt = stats.pivot(index='Class', columns='ImageId', values = value)
pvt.fillna(0., inplace = True)
fig, ax = plt.subplots(figsize = (10, 4))
im = ax.imshow(pvt, interpolation = 'nearest', cmap = plt.cm.plasma,
extent = [0 ,25, 10, 0])
ax.set_xlabel('Image')
ax.set_ylabel('Class Type')
ax.set_xticks(np.arange(0.5, 25.4, 1))
ax.set_yticks(np.arange(0.5, 10.4, 1))
ax.set_xticklabels(np.arange(1, 26))
ax.set_yticklabels(pvt.index)
ax.set_title(title)
fig.colorbar(im)
def plot_bar_stats():
stats = collect_stats()
pvt = stats.pivot(index = 'Class', columns = 'ImageId', values = 'TotalArea')
perc_area = np.cumsum(pvt, axis = 0)
class_r = {}
sns.set_style('white')
sns.set_context({'figure.figsize': (12, 8)})
for cl in CLASSES: class_r[CLASSES[cl]] = cl
for cl in np.arange(1, 11):
class_name = perc_area.index[-cl]
class_id = class_r[class_name]
ax = sns.barplot(x = perc_area.columns, y = perc_area.loc[class_name],
color = COLORS[class_id], label = class_name)
ax.legend(loc = 2)
sns.despine(left = True)
ax.set_xlabel('Image ID')
ax.set_ylabel('Class Type')
ax.set_xticklabels(perc_area.columns, rotation = -60)
def jaccard_index(mask_1, mask_2):
"""
Calculate jaccard index between two masks
:param mask_1:
:param mask_2:
:return:
"""
assert len(mask_1.shape) == len(mask_2.shape) == 2
assert 0 <= np.amax(mask_1) <=1
assert 0 <= np.amax(mask_2) <=1
intersection = np.sum(mask_1.astype(np.float32) * mask_2.astype(np.float32))
union = np.sum(mask_1.astype(np.float32) + mask_2.astype(np.float32)) - \
intersection
if union == 0:
return 1.
return intersection / union
def mask_to_polygons(mask, img_id, epsilon=1, min_area=1., test=True):
"""
Generate polygons from mask
:param mask:
:param epsilon:
:param min_area:
:return:
"""
# find contours, cv2 switches the x-y coordiante of mask to y-x in contours
# This matches the wkt data in train_wkt_v4, which is desirable for submission
image, contours, hierarchy = cv2.findContours(
((mask == 1) * 255).astype(np.uint8),
cv2.RETR_CCOMP, cv2.CHAIN_APPROX_TC89_KCOS)
# create approximate contours
approx_contours = [cv2.approxPolyDP(cnt, epsilon, True)
for cnt in contours]
if not contours:
return MultiPolygon()
cnt_children = defaultdict(list)
child_contours = set()
assert hierarchy.shape[0] == 1
for idx, (_, _, _, parent_idx) in enumerate(hierarchy[0]):
if parent_idx != -1:
child_contours.add(idx)
cnt_children[parent_idx].append(approx_contours[idx])
# create actual polygon filtering by area (remove artifacts)
all_polygons = []
for idx, cnt in enumerate(approx_contours):
if idx not in child_contours and cv2.contourArea(cnt) >= min_area:
assert cnt.shape[1] == 1
poly = Polygon(shell = cnt[:, 0, :],
holes = [c[:, 0, :] for c in cnt_children.get(idx, [])
if cv2.contourArea(c) >= min_area])
all_polygons.append(poly)
# approximating polygons might have created invalid ones, fix them
all_polygons = MultiPolygon(all_polygons)
if not all_polygons.is_valid:
all_polygons = all_polygons.buffer(0)
# Sometimes buffer() converts a simple Multipolygon to just a Polygon,
# need to keep it a Multi throughout
if all_polygons.type == 'Polygon':
all_polygons = MultiPolygon([all_polygons])
id = test_IDs_dict[img_id] if test else train_IDs_dict[img_id]
x_max = grid_sizes[grid_sizes.ImageId == id].Xmax.values[0]
y_min = grid_sizes[grid_sizes.ImageId == id].Ymin.values[0]
x_scaler, y_scaler = x_max / mask.shape[1], y_min / mask.shape[0]
scaled_pred_polygons = scale(all_polygons, xfact=x_scaler,
yfact=y_scaler, origin=(0., 0., 0.))
return scaled_pred_polygons
def polygon_jaccard(final_polygons, train_polygons):
"""
Calcualte the jaccard index of two polygons, based on data type of
shapely.geometry.MultiPolygon
:param final_polygons:
:param train_polygons:
:return:
"""
return final_polygons.intersection(train_polygons).area /\
final_polygons.union(train_polygons).area
class ImageData:
def __init__(self, image_id, phase='train'):
self.image_id = train_IDs_dict[image_id] \
if phase == 'train' else test_IDs_dict[image_id]
self.stat = image_stat(self.image_id) if phase == 'train' else None
self.three_band_image = None
self.sixteen_band_image = None
self.image = None
self.image_size = None
self._xymax = None
self.label = None
self.crop_image = None
self.train_feature = None
self.pred_mask = None
def load_pre_mask(self):
self.pred_mask = None
def load_image(self):
"""
Load three band and sixteen band images, registered and at the same
resolution
Assign value for image_size
:return:
"""
im = self.image_stack()
self.three_band_image = im[..., 0:3]
self.sixteen_band_image = im[..., 3:]
self.image = im
self.image_size = np.shape(im)[0: 2]
xmax = grid_sizes[grid_sizes.ImageId == self.image_id].Xmax.values[0]
ymax = grid_sizes[grid_sizes.ImageId == self.image_id].Ymin.values[0]
self._xymax = [xmax, ymax]
def get_image_path(self):
"""
Returns the paths for all images
:return:
"""
return {
'3': '{}/data/three_band/{}.tif'.format(data_dir, self.image_id),
'A': '{}/data/sixteen_band/{}_A.tif'.format(data_dir, self.image_id),
'M': '{}/data/sixteen_band/{}_M.tif'.format(data_dir, self.image_id),
'P': '{}/data/sixteen_band/{}_P.tif'.format(data_dir, self.image_id)
}
def read_image(self):
"""
Read all original images
:return:
"""
images = {}
path = self.get_image_path()
for key in path:
im = tifffile.imread(path[key])
if key != 'P':
images[key] = np.transpose(im, (1, 2, 0))
elif key == 'P':
images[key] = im
im3 = images['3']
ima = images['A']
imm = images['M']
imp = images['P']
nx, ny, _ = im3.shape
images['A'] = resize(ima, [nx, ny])
images['M'] = resize(imm, [nx, ny])
images['P'] = resize(imp, [nx, ny])
return images
def image_stack(self):
"""
Resample all images to highest resolution and align all images
:return:
"""
images = self.read_image()
im3 = images['3']
ima = images['A']
imm = images['M']
imp = images['P']
imp = np.expand_dims(imp, 2)
[nx, ny, _] = im3.shape
warp_matrix_a = np.load(
(data_dir +
'/utils/image_alignment/{}_warp_matrix_a.npz').format(self.image_id)
)
warp_matrix_m = np.load(
(data_dir +
'/utils/image_alignment/{}_warp_matrix_m.npz').format(self.image_id)
)
ima = affine_transform(ima, warp_matrix_a, [nx, ny])
imm = affine_transform(imm, warp_matrix_m, [nx, ny])
im = np.concatenate((im3, ima, imm, imp), axis=-1)
return im
def create_label(self):
"""
Create the class labels
:return:
"""
if self.image is None:
self.load_image()
labels = np.zeros(np.append(self.image_size, len(CLASSES)), np.uint8)
for cl in CLASSES:
polygon_list = get_polygon_list(self.image_id, cl)
perim_list, inter_list = generate_contours(
polygon_list, self.image_size, self._xymax)
mask = generate_mask_from_contours(
self.image_size, perim_list, inter_list, class_id = 1)
labels[..., cl - 1] = mask
self.label = labels
def create_train_feature(self):
"""
Create synthesized features
:return:
"""
if self.three_band_image is None:
self.load_image()
m = self.sixteen_band_image[..., 8:].astype(np.float32)
rgb = self.three_band_image.astype(np.float32)
image_r = rgb[..., 0]
image_g = rgb[..., 1]
image_b = rgb[..., 2]
nir = m[..., 7]
re = m[..., 5]
L, C1, C2 = 1.0, 6.0, 7.5
evi = np.nan_to_num(
(nir - image_r) / (nir + C1 * image_r - C2 * image_b + L))
evi = evi.clip(max=np.percentile(evi, 99), min=np.percentile(evi, 1))
evi = np.expand_dims(evi, 2)
ndwi = (image_g - nir) / (image_g + nir)
ndwi = np.expand_dims(ndwi, 2)
savi = (nir - image_r) / (image_r + nir)
savi = np.expand_dims(savi, 2)
# binary = (ccci > 0.11).astype(np.float32) marks water fairly well
ccci = np.nan_to_num(
(nir - re) / (nir + re) * (nir - image_r) / (nir + image_r))
ccci = ccci.clip(
max=np.percentile(ccci, 99.9),
min=np.percentile(ccci, 0.1))
ccci = np.expand_dims(ccci, 2)
feature = np.concatenate([m, rgb, evi, ndwi, savi, ccci], 2)
feature[feature == np.inf] = 0
feature[feature == -np.inf] = 0
self.train_feature = feature
def visualize_image(self, plot_all=True):
"""
Visualize all images and class labels
:param plot_all:
:return:
"""
if self.label is None:
self.create_label()
if not plot_all:
fig, axarr = plt.subplots(figsize=[10, 10])
ax = axarr
else:
fig, axarr = plt.subplots(figsize=[20, 20], ncols=3, nrows=3)
ax = axarr[0][0]
polygon_list = {}
for cl in CLASSES:
polygon_list[cl] = get_polygon_list(self.image_id, cl)
print('{}: {} \t\tcount = {}'.format(
cl, CLASSES[cl], len(polygon_list[cl])))
legend = plot_polygon(polygon_list=polygon_list, ax=ax)
ax.set_xlim(0, self._xymax[0])
ax.set_ylim(self._xymax[1], 0)
ax.set_xlabel(self.image_size[0])
ax.set_ylabel(self.image_size[1])
if plot_all:
three_band_rescale = scale_percentile(self.three_band_image)
sixteen_band_rescale = scale_percentile(self.sixteen_band_image)
plot_image(three_band_rescale, axarr[0][1], self.image_id, '3')
plot_overlay(three_band_rescale, axarr[0][2], self.image_id, '3',
polygon_list,
scaler=self.image_size / np.array([self._xymax[1],
self._xymax[0]]))
axarr[0][2].set_ylim(self.image_size[0], 0)
axarr[0][2].set_xlim(0, self.image_size[1])
plot_image(sixteen_band_rescale, axarr[1][0], self.image_id, 'A',
selected_channel=[0, 3, 6])
plot_image(sixteen_band_rescale, axarr[1][1], self.image_id, 'A',
selected_channel=[1, 4, 7])
plot_image(sixteen_band_rescale, axarr[1][2], self.image_id, 'A',
selected_channel=[2, 5, 0])
plot_image(sixteen_band_rescale, axarr[2][0], self.image_id, 'M',
selected_channel=[8, 11, 14])
plot_image(sixteen_band_rescale, axarr[2][1], self.image_id, 'M',
selected_channel=[9, 12, 15])
plot_image(sixteen_band_rescale, axarr[2][2], self.image_id, 'M',
selected_channel=[10, 13, 8])
ax.legend(handles = legend,
bbox_to_anchor=(0.9, 0.95),
bbox_transform=plt.gcf().transFigure,
ncol=5,
fontsize='large',
title='Objects-' + self.image_id,
framealpha=0.3)
def visualize_label(self, x_range=None, y_range=None, alpha=1.0):
"""
Visualize labels
:param plot_all:
:return:
"""
if self.label is None:
self.create_label()
if not x_range:
x_range = [0, self.image_size[0]]
if not y_range:
y_range = [0, self.image_size[1]]
fig, axarr = plt.subplots(figsize=[13, 7], ncols=2, nrows=1)
polygon_list = {}
for cl in CLASSES:
polygon_list[cl] = get_polygon_list(self.image_id, cl)
print('{}: {} \t\tcount = {}'.format(
cl, CLASSES[cl], len(polygon_list[cl])))
three_band_rescale = scale_percentile(self.three_band_image)
ax = axarr[0]
ax.imshow(three_band_rescale, vmax=1., vmin=0.)
ax.set_xticks([])
ax.set_yticks([])
ax = axarr[1]
legend = plot_overlay(
three_band_rescale, ax, self.image_id, 'P', polygon_list,
scaler=self.image_size / np.array([self._xymax[1], self._xymax[0]]),
alpha=alpha, rgb=True)
ax.legend(handles=legend,
bbox_to_anchor=(0.83, 0.92),
bbox_transform=plt.gcf().transFigure,
ncol=5,
framealpha=0.3)
plt.suptitle('Objects-' + self.image_id)
def apply_crop(self, patch_size, ref_point=(0, 0), method='random'):
if self.image is None:
self.load_image()
crop_area = np.zeros([2, 2])
width = self.image_size[0]
height = self.image_size[1]
assert width >= patch_size > 0 and patch_size <= height
if method == 'random':
ref_point[0] = random.randint(0, width - patch_size)
ref_point[1] = random.randint(0, height - patch_size)
crop_area[0][0] = ref_point[0]
crop_area[1][0] = ref_point[1]
crop_area[0][1] = ref_point[0] + patch_size
crop_area[1][1] = ref_point[1] + patch_size
elif method == 'grid':
assert width > ref_point[0] + patch_size
assert height > ref_point[1] + patch_size
crop_area[0][0] = ref_point[0]
crop_area[1][0] = ref_point[1]
crop_area[0][1] = ref_point[0] + patch_size
crop_area[1][1] = ref_point[1] + patch_size
else:
raise NotImplementedError(
'"method" should either be "random" or "grid"')
self.crop_image = crop(self.image, crop_area)
if __name__ == '__main__':
img_data = ImageData(0)
# load data
img_data.load_image()
img_data.create_label()
#img_data.create_train_feature()
# visualize
img_data.visualize_label(alpha=0.7)
plt.show()
| [
"pandas.read_csv",
"shapely.wkt.loads",
"descartes.patch.PolygonPatch",
"seaborn.set_style",
"numpy.array",
"cv2.approxPolyDP",
"numpy.percentile",
"shapely.affinity.scale",
"numpy.arange",
"numpy.mean",
"numpy.reshape",
"seaborn.despine",
"cv2.contourArea",
"numpy.dot",
"numpy.concatena... | [((2667, 2780), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/data/train_wkt_v4.csv')"], {'names': "['ImageId', 'ClassId', 'MultipolygonWKT']", 'skiprows': '(1)'}), "(data_dir + '/data/train_wkt_v4.csv', names=['ImageId',\n 'ClassId', 'MultipolygonWKT'], skiprows=1)\n", (2678, 2780), True, 'import pandas as pd\n'), ((2957, 3054), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/data/grid_sizes.csv')"], {'names': "['ImageId', 'Xmax', 'Ymin']", 'skiprows': '(1)'}), "(data_dir + '/data/grid_sizes.csv', names=['ImageId', 'Xmax',\n 'Ymin'], skiprows=1)\n", (2968, 3054), True, 'import pandas as pd\n'), ((3129, 3247), 'pandas.read_csv', 'pd.read_csv', (["(data_dir + '/data/sample_submission.csv')"], {'names': "['ImageId', 'ClassId', 'MultipolygonWKT']", 'skiprows': '(1)'}), "(data_dir + '/data/sample_submission.csv', names=['ImageId',\n 'ClassId', 'MultipolygonWKT'], skiprows=1)\n", (3140, 3247), True, 'import pandas as pd\n'), ((4255, 4330), 'cv2.resize', 'cv2.resize', (['im', '(shape_out[1], shape_out[0])'], {'interpolation': 'cv2.INTER_CUBIC'}), '(im, (shape_out[1], shape_out[0]), interpolation=cv2.INTER_CUBIC)\n', (4265, 4330), False, 'import cv2\n'), ((4609, 4756), 'cv2.warpAffine', 'cv2.warpAffine', (['img', 'warp_matrix', '(out_shape[1], out_shape[0])'], {'flags': '(cv2.INTER_LINEAR + cv2.WARP_INVERSE_MAP)', 'borderMode': 'cv2.BORDER_REPLICATE'}), '(img, warp_matrix, (out_shape[1], out_shape[0]), flags=cv2.\n INTER_LINEAR + cv2.WARP_INVERSE_MAP, borderMode=cv2.BORDER_REPLICATE)\n', (4623, 4756), False, 'import cv2\n'), ((5499, 5527), 'shapely.wkt.loads', 'wkt.loads', (['polygon.values[0]'], {}), '(polygon.values[0])\n', (5508, 5527), True, 'import shapely.wkt as wkt\n'), ((7036, 7064), 'numpy.zeros', 'np.zeros', (['img_size', 'np.uint8'], {}), '(img_size, np.uint8)\n', (7044, 7064), True, 'import numpy as np\n'), ((7276, 7316), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'perim_list', 'class_id'], {}), '(mask, perim_list, class_id)\n', (7288, 7316), False, 'import cv2\n'), ((7321, 7354), 'cv2.fillPoly', 'cv2.fillPoly', (['mask', 'inter_list', '(0)'], {}), '(mask, inter_list, 0)\n', (7333, 7354), False, 'import cv2\n'), ((10809, 10838), 'numpy.percentile', 'np.percentile', (['img', '(1)'], {'axis': '(0)'}), '(img, 1, axis=0)\n', (10822, 10838), True, 'import numpy as np\n'), ((10958, 10985), 'numpy.reshape', 'np.reshape', (['img', 'orig_shape'], {}), '(img, orig_shape)\n', (10968, 10985), True, 'import numpy as np\n'), ((11124, 11167), 'numpy.dot', 'np.dot', (['rgb[..., :3]', '[0.299, 0.587, 0.144]'], {}), '(rgb[..., :3], [0.299, 0.587, 0.144])\n', (11130, 11167), True, 'import numpy as np\n'), ((12640, 12763), 'pandas.DataFrame', 'pd.DataFrame', (["{'Class': CLASSES, 'Counts': counts, 'TotalArea': total_area, 'MeanArea':\n mean_area, 'STDArea': std_area}"], {}), "({'Class': CLASSES, 'Counts': counts, 'TotalArea': total_area,\n 'MeanArea': mean_area, 'STDArea': std_area})\n", (12652, 12763), True, 'import pandas as pd\n'), ((13407, 13429), 'sys.stdout.write', 'sys.stdout.write', (['"""\n"""'], {}), "('\\n')\n", (13423, 13429), False, 'import sys\n'), ((13441, 13457), 'pandas.concat', 'pd.concat', (['stats'], {}), '(stats)\n', (13450, 13457), True, 'import pandas as pd\n'), ((14106, 14135), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(10, 4)'}), '(figsize=(10, 4))\n', (14118, 14135), True, 'import matplotlib.pyplot as plt\n'), ((14672, 14694), 'numpy.cumsum', 'np.cumsum', (['pvt'], {'axis': '(0)'}), '(pvt, axis=0)\n', (14681, 14694), True, 'import numpy as np\n'), ((14718, 14740), 'seaborn.set_style', 'sns.set_style', (['"""white"""'], {}), "('white')\n", (14731, 14740), True, 'import seaborn as sns\n'), ((14745, 14789), 'seaborn.set_context', 'sns.set_context', (["{'figure.figsize': (12, 8)}"], {}), "({'figure.figsize': (12, 8)})\n", (14760, 14789), True, 'import seaborn as sns\n'), ((14855, 14871), 'numpy.arange', 'np.arange', (['(1)', '(11)'], {}), '(1, 11)\n', (14864, 14871), True, 'import numpy as np\n'), ((15131, 15153), 'seaborn.despine', 'sns.despine', ([], {'left': '(True)'}), '(left=True)\n', (15142, 15153), True, 'import seaborn as sns\n'), ((16523, 16540), 'collections.defaultdict', 'defaultdict', (['list'], {}), '(list)\n', (16534, 16540), False, 'from collections import defaultdict\n'), ((17378, 17404), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (['all_polygons'], {}), '(all_polygons)\n', (17390, 17404), False, 'from shapely.geometry import MultiPolygon, Polygon\n'), ((18003, 18078), 'shapely.affinity.scale', 'scale', (['all_polygons'], {'xfact': 'x_scaler', 'yfact': 'y_scaler', 'origin': '(0.0, 0.0, 0.0)'}), '(all_polygons, xfact=x_scaler, yfact=y_scaler, origin=(0.0, 0.0, 0.0))\n', (18008, 18078), False, 'from shapely.affinity import scale\n'), ((29036, 29046), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (29044, 29046), True, 'import matplotlib.pyplot as plt\n'), ((1945, 1971), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1961, 1971), False, 'import os\n'), ((3542, 3574), 'numpy.in1d', 'np.in1d', (['_df.ImageId', 'duplicates'], {}), '(_df.ImageId, duplicates)\n', (3549, 3574), True, 'import numpy as np\n'), ((3605, 3638), 'numpy.in1d', 'np.in1d', (['_df1.ImageId', 'duplicates'], {}), '(_df1.ImageId, duplicates)\n', (3612, 3638), True, 'import numpy as np\n'), ((10850, 10880), 'numpy.percentile', 'np.percentile', (['img', '(99)'], {'axis': '(0)'}), '(img, 99, axis=0)\n', (10863, 10880), True, 'import numpy as np\n'), ((13384, 13402), 'sys.stdout.flush', 'sys.stdout.flush', ([], {}), '()\n', (13400, 13402), False, 'import sys\n'), ((14332, 14355), 'numpy.arange', 'np.arange', (['(0.5)', '(25.4)', '(1)'], {}), '(0.5, 25.4, 1)\n', (14341, 14355), True, 'import numpy as np\n'), ((14375, 14398), 'numpy.arange', 'np.arange', (['(0.5)', '(10.4)', '(1)'], {}), '(0.5, 10.4, 1)\n', (14384, 14398), True, 'import numpy as np\n'), ((14423, 14439), 'numpy.arange', 'np.arange', (['(1)', '(26)'], {}), '(1, 26)\n', (14432, 14439), True, 'import numpy as np\n'), ((14967, 15075), 'seaborn.barplot', 'sns.barplot', ([], {'x': 'perc_area.columns', 'y': 'perc_area.loc[class_name]', 'color': 'COLORS[class_id]', 'label': 'class_name'}), '(x=perc_area.columns, y=perc_area.loc[class_name], color=COLORS[\n class_id], label=class_name)\n', (14978, 15075), True, 'import seaborn as sns\n'), ((15497, 15512), 'numpy.amax', 'np.amax', (['mask_1'], {}), '(mask_1)\n', (15504, 15512), True, 'import numpy as np\n'), ((15533, 15548), 'numpy.amax', 'np.amax', (['mask_2'], {}), '(mask_2)\n', (15540, 15548), True, 'import numpy as np\n'), ((16370, 16406), 'cv2.approxPolyDP', 'cv2.approxPolyDP', (['cnt', 'epsilon', '(True)'], {}), '(cnt, epsilon, True)\n', (16386, 16406), False, 'import cv2\n'), ((16488, 16502), 'shapely.geometry.MultiPolygon', 'MultiPolygon', ([], {}), '()\n', (16500, 16502), False, 'from shapely.geometry import MultiPolygon, Polygon\n'), ((21101, 21123), 'numpy.expand_dims', 'np.expand_dims', (['imp', '(2)'], {}), '(imp, 2)\n', (21115, 21123), True, 'import numpy as np\n'), ((21593, 21638), 'numpy.concatenate', 'np.concatenate', (['(im3, ima, imm, imp)'], {'axis': '(-1)'}), '((im3, ima, imm, imp), axis=-1)\n', (21607, 21638), True, 'import numpy as np\n'), ((22787, 22859), 'numpy.nan_to_num', 'np.nan_to_num', (['((nir - image_r) / (nir + C1 * image_r - C2 * image_b + L))'], {}), '((nir - image_r) / (nir + C1 * image_r - C2 * image_b + L))\n', (22800, 22859), True, 'import numpy as np\n'), ((22965, 22987), 'numpy.expand_dims', 'np.expand_dims', (['evi', '(2)'], {}), '(evi, 2)\n', (22979, 22987), True, 'import numpy as np\n'), ((23053, 23076), 'numpy.expand_dims', 'np.expand_dims', (['ndwi', '(2)'], {}), '(ndwi, 2)\n', (23067, 23076), True, 'import numpy as np\n'), ((23142, 23165), 'numpy.expand_dims', 'np.expand_dims', (['savi', '(2)'], {}), '(savi, 2)\n', (23156, 23165), True, 'import numpy as np\n'), ((23258, 23332), 'numpy.nan_to_num', 'np.nan_to_num', (['((nir - re) / (nir + re) * (nir - image_r) / (nir + image_r))'], {}), '((nir - re) / (nir + re) * (nir - image_r) / (nir + image_r))\n', (23271, 23332), True, 'import numpy as np\n'), ((23472, 23495), 'numpy.expand_dims', 'np.expand_dims', (['ccci', '(2)'], {}), '(ccci, 2)\n', (23486, 23495), True, 'import numpy as np\n'), ((23515, 23565), 'numpy.concatenate', 'np.concatenate', (['[m, rgb, evi, ndwi, savi, ccci]', '(2)'], {}), '([m, rgb, evi, ndwi, savi, ccci], 2)\n', (23529, 23565), True, 'import numpy as np\n'), ((26669, 26716), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[13, 7]', 'ncols': '(2)', 'nrows': '(1)'}), '(figsize=[13, 7], ncols=2, nrows=1)\n', (26681, 26716), True, 'import matplotlib.pyplot as plt\n'), ((27591, 27631), 'matplotlib.pyplot.suptitle', 'plt.suptitle', (["('Objects-' + self.image_id)"], {}), "('Objects-' + self.image_id)\n", (27603, 27631), True, 'import matplotlib.pyplot as plt\n'), ((27789, 27805), 'numpy.zeros', 'np.zeros', (['[2, 2]'], {}), '([2, 2])\n', (27797, 27805), True, 'import numpy as np\n'), ((5922, 5938), 'numpy.round', 'np.round', (['coords'], {}), '(coords)\n', (5930, 5938), True, 'import numpy as np\n'), ((8593, 8690), 'descartes.patch.PolygonPatch', 'PolygonPatch', ([], {'polygon': 'polygon_rescale', 'color': 'COLORS[cl]', 'lw': '(0)', 'alpha': 'alpha', 'zorder': 'ZORDER[cl]'}), '(polygon=polygon_rescale, color=COLORS[cl], lw=0, alpha=alpha,\n zorder=ZORDER[cl])\n', (8605, 8690), False, 'from descartes.patch import PolygonPatch\n'), ((17679, 17707), 'shapely.geometry.MultiPolygon', 'MultiPolygon', (['[all_polygons]'], {}), '([all_polygons])\n', (17691, 17707), False, 'from shapely.geometry import MultiPolygon, Polygon\n'), ((19464, 19476), 'numpy.shape', 'np.shape', (['im'], {}), '(im)\n', (19472, 19476), True, 'import numpy as np\n'), ((20339, 20365), 'tifffile.imread', 'tifffile.imread', (['path[key]'], {}), '(path[key])\n', (20354, 20365), False, 'import tifffile\n'), ((23956, 23986), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[10, 10]'}), '(figsize=[10, 10])\n', (23968, 23986), True, 'import matplotlib.pyplot as plt\n'), ((24049, 24097), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '[20, 20]', 'ncols': '(3)', 'nrows': '(3)'}), '(figsize=[20, 20], ncols=3, nrows=3)\n', (24061, 24097), True, 'import matplotlib.pyplot as plt\n'), ((28001, 28038), 'random.randint', 'random.randint', (['(0)', '(width - patch_size)'], {}), '(0, width - patch_size)\n', (28015, 28038), False, 'import random\n'), ((28066, 28104), 'random.randint', 'random.randint', (['(0)', '(height - patch_size)'], {}), '(0, height - patch_size)\n', (28080, 28104), False, 'import random\n'), ((8076, 8161), 'shapely.affinity.scale', 'affinity.scale', (['polygon'], {'xfact': 'scaler[1]', 'yfact': 'scaler[0]', 'origin': '[0.0, 0.0, 0.0]'}), '(polygon, xfact=scaler[1], yfact=scaler[0], origin=[0.0, 0.0,\n 0.0])\n', (8090, 8161), False, 'from shapely import affinity\n'), ((10551, 10614), 'numpy.reshape', 'np.reshape', (['img', '[orig_shape[0] * orig_shape[1], orig_shape[2]]'], {}), '(img, [orig_shape[0] * orig_shape[1], orig_shape[2]])\n', (10561, 10614), True, 'import numpy as np\n'), ((16976, 16996), 'cv2.contourArea', 'cv2.contourArea', (['cnt'], {}), '(cnt)\n', (16991, 16996), False, 'import cv2\n'), ((20423, 20450), 'numpy.transpose', 'np.transpose', (['im', '(1, 2, 0)'], {}), '(im, (1, 2, 0))\n', (20435, 20450), True, 'import numpy as np\n'), ((22900, 22922), 'numpy.percentile', 'np.percentile', (['evi', '(99)'], {}), '(evi, 99)\n', (22913, 22922), True, 'import numpy as np\n'), ((22928, 22949), 'numpy.percentile', 'np.percentile', (['evi', '(1)'], {}), '(evi, 1)\n', (22941, 22949), True, 'import numpy as np\n'), ((23388, 23413), 'numpy.percentile', 'np.percentile', (['ccci', '(99.9)'], {}), '(ccci, 99.9)\n', (23401, 23413), True, 'import numpy as np\n'), ((23431, 23455), 'numpy.percentile', 'np.percentile', (['ccci', '(0.1)'], {}), '(ccci, 0.1)\n', (23444, 23455), True, 'import numpy as np\n'), ((10730, 10778), 'numpy.reshape', 'np.reshape', (['img', '[orig_shape[0] * orig_shape[1]]'], {}), '(img, [orig_shape[0] * orig_shape[1]])\n', (10740, 10778), True, 'import numpy as np\n'), ((12296, 12340), 'numpy.sum', 'np.sum', (['[poly.area for poly in polygon_list]'], {}), '([poly.area for poly in polygon_list])\n', (12302, 12340), True, 'import numpy as np\n'), ((12417, 12462), 'numpy.mean', 'np.mean', (['[poly.area for poly in polygon_list]'], {}), '([poly.area for poly in polygon_list])\n', (12424, 12462), True, 'import numpy as np\n'), ((12537, 12581), 'numpy.std', 'np.std', (['[poly.area for poly in polygon_list]'], {}), '([poly.area for poly in polygon_list])\n', (12543, 12581), True, 'import numpy as np\n'), ((26110, 26119), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (26117, 26119), True, 'import matplotlib.pyplot as plt\n'), ((27306, 27348), 'numpy.array', 'np.array', (['[self._xymax[1], self._xymax[0]]'], {}), '([self._xymax[1], self._xymax[0]])\n', (27314, 27348), True, 'import numpy as np\n'), ((27500, 27509), 'matplotlib.pyplot.gcf', 'plt.gcf', ([], {}), '()\n', (27507, 27509), True, 'import matplotlib.pyplot as plt\n'), ((24998, 25040), 'numpy.array', 'np.array', (['[self._xymax[1], self._xymax[0]]'], {}), '([self._xymax[1], self._xymax[0]])\n', (25006, 25040), True, 'import numpy as np\n'), ((17217, 17235), 'cv2.contourArea', 'cv2.contourArea', (['c'], {}), '(c)\n', (17232, 17235), False, 'import cv2\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
tif_to_nii
command line executable to convert a directory of tif images
(from one image) to a nifti image stacked along a user-specified axis
call as: python tif_to_nii.py /path/to/tif/ /path/to/nifti
(append optional arguments to the call as desired)
Author: <NAME> (<EMAIL>)
"""
import argparse
from glob import glob
import os
from pathlib import Path
import sys
import tifffile
from PIL import Image
import nibabel as nib
import numpy as np
def arg_parser():
parser = argparse.ArgumentParser(description='merge 2d tif images into a 3d image')
parser.add_argument('img_dir', type=str,
help='path to tiff image directory')
parser.add_argument('out_dir', type=str,
help='path to output the corresponding tif image slices')
parser.add_argument('-a', '--axis', type=int, default=2,
help='axis on which to stack the 2d images')
return parser
def split_filename(filepath):
path = os.path.dirname(filepath)
filename = os.path.basename(filepath)
base, ext = os.path.splitext(filename)
if ext == '.gz':
base, ext2 = os.path.splitext(base)
ext = ext2 + ext
return path, base, ext
def main():
try:
args = arg_parser().parse_args()
img_dir = Path(args.img_dir)
fns = sorted([str(fn) for fn in img_dir.glob('*.tif*')])
if not fns:
raise ValueError(f'img_dir ({args.img_dir}) does not contain any .tif or .tiff images.')
imgs = []
for fn in fns:
_, base, ext = split_filename(fn)
img = np.asarray(Image.open(fn)).astype(np.float32).squeeze()
if img.ndim != 2:
raise Exception(f'Only 2D data supported. File {base}{ext} has dimension {img.ndim}.')
imgs.append(img)
img = np.stack(imgs, axis=args.axis)
nib.Nifti1Image(img,None).to_filename(os.path.join(args.out_dir, f'{base}.nii.gz'))
return 0
except Exception as e:
print(e)
return 1
def main1(input_path, output_path):
img = tifffile.imread(input_path)
img = np.asarray(img).astype(np.float32)
nib.Nifti1Image(img, None).to_filename(output_path)
if __name__ == "__main__":
input_path = r'H:\血管后处理文献阅读\新增参考文献\graph\tutorials\3d_classification\datasets\dataset40\val'
for dir in os.listdir(input_path):
dir_path = os.path.join(input_path, dir)
for file in os.listdir(dir_path):
if file[-8:] == '3dim.tif':
file_path = os.path.join(dir_path, file)
output_path = os.path.join(dir_path, file[:-4] + '.nii.gz')
print(file_path, '\n', output_path)
main1(file_path, output_path)
# img=nib.load(output_path)
# img_arr=img.get_fdata()
# print(img_arr.shape)
#
# nib.Nifti1Image(img_arr[0], None).to_filename(r'C:\Users\Administrator\Desktop\0.nii')
# nib.Nifti1Image(img_arr[1], None).to_filename(r'C:\Users\Administrator\Desktop\1.nii')
# nib.Nifti1Image(img_arr[2], None).to_filename(r'C:\Users\Administrator\Desktop\2.nii')
#
#
# # io.imshow(img_arr) | [
"os.listdir",
"tifffile.imread",
"PIL.Image.open",
"argparse.ArgumentParser",
"pathlib.Path",
"os.path.splitext",
"os.path.join",
"numpy.asarray",
"os.path.dirname",
"numpy.stack",
"os.path.basename",
"nibabel.Nifti1Image"
] | [((528, 602), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""merge 2d tif images into a 3d image"""'}), "(description='merge 2d tif images into a 3d image')\n", (551, 602), False, 'import argparse\n'), ((1027, 1052), 'os.path.dirname', 'os.path.dirname', (['filepath'], {}), '(filepath)\n', (1042, 1052), False, 'import os\n'), ((1068, 1094), 'os.path.basename', 'os.path.basename', (['filepath'], {}), '(filepath)\n', (1084, 1094), False, 'import os\n'), ((1111, 1137), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1127, 1137), False, 'import os\n'), ((2127, 2154), 'tifffile.imread', 'tifffile.imread', (['input_path'], {}), '(input_path)\n', (2142, 2154), False, 'import tifffile\n'), ((2397, 2419), 'os.listdir', 'os.listdir', (['input_path'], {}), '(input_path)\n', (2407, 2419), False, 'import os\n'), ((1180, 1202), 'os.path.splitext', 'os.path.splitext', (['base'], {}), '(base)\n', (1196, 1202), False, 'import os\n'), ((1337, 1355), 'pathlib.Path', 'Path', (['args.img_dir'], {}), '(args.img_dir)\n', (1341, 1355), False, 'from pathlib import Path\n'), ((1879, 1909), 'numpy.stack', 'np.stack', (['imgs'], {'axis': 'args.axis'}), '(imgs, axis=args.axis)\n', (1887, 1909), True, 'import numpy as np\n'), ((2440, 2469), 'os.path.join', 'os.path.join', (['input_path', 'dir'], {}), '(input_path, dir)\n', (2452, 2469), False, 'import os\n'), ((2490, 2510), 'os.listdir', 'os.listdir', (['dir_path'], {}), '(dir_path)\n', (2500, 2510), False, 'import os\n'), ((1956, 2000), 'os.path.join', 'os.path.join', (['args.out_dir', 'f"""{base}.nii.gz"""'], {}), "(args.out_dir, f'{base}.nii.gz')\n", (1968, 2000), False, 'import os\n'), ((2165, 2180), 'numpy.asarray', 'np.asarray', (['img'], {}), '(img)\n', (2175, 2180), True, 'import numpy as np\n'), ((2204, 2230), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['img', 'None'], {}), '(img, None)\n', (2219, 2230), True, 'import nibabel as nib\n'), ((1918, 1944), 'nibabel.Nifti1Image', 'nib.Nifti1Image', (['img', 'None'], {}), '(img, None)\n', (1933, 1944), True, 'import nibabel as nib\n'), ((2580, 2608), 'os.path.join', 'os.path.join', (['dir_path', 'file'], {}), '(dir_path, file)\n', (2592, 2608), False, 'import os\n'), ((2639, 2684), 'os.path.join', 'os.path.join', (['dir_path', "(file[:-4] + '.nii.gz')"], {}), "(dir_path, file[:-4] + '.nii.gz')\n", (2651, 2684), False, 'import os\n'), ((1658, 1672), 'PIL.Image.open', 'Image.open', (['fn'], {}), '(fn)\n', (1668, 1672), False, 'from PIL import Image\n')] |
from ipyleaflet import Map, basemaps, basemap_to_tiles
m = Map(
layers=(basemap_to_tiles(basemaps.NASAGIBS.ModisTerraTrueColorCR, "2017-04-08"), ),
center=(52.204793, 360.121558),
zoom=4
)
m
import ipyleaflet
import json
import pandas as pd
import os
import requests
from ipywidgets import link, FloatSlider
from branca.colormap import linear
def load_data(url, filename, file_type):
r = requests.get(url)
with open(filename, 'w') as f:
f.write(r.content.decode("utf-8"))
with open(filename, 'r') as f:
return file_type(f)
geo_json_data = load_data(
'https://raw.githubusercontent.com/jupyter-widgets/ipyleaflet/master/examples/us-states.json',
'us-states.json',
json.load)
unemployment = load_data(
'https://raw.githubusercontent.com/jupyter-widgets/ipyleaflet/master/examples/US_Unemployment_Oct2012.csv',
'US_Unemployment_Oct2012.csv',
pd.read_csv)
unemployment = dict(zip(unemployment['State'].tolist(), unemployment['Unemployment'].tolist()))
layer = ipyleaflet.Choropleth(
geo_data=geo_json_data,
choro_data=unemployment,
colormap=linear.YlOrRd_04,
border_color='black',
style={'fillOpacity': 0.8, 'dashArray': '5, 5'})
m = ipyleaflet.Map(center = (43,-100), zoom = 4)
m.add_layer(layer)
m
import seaborn as sns
sns.set(style="white")
# Load the example mpg dataset
mpg = sns.load_dataset("mpg")
# Plot miles per gallon against horsepower with other semantics
sns.relplot(x="horsepower", y="mpg", hue="origin", size="weight",
sizes=(40, 400), alpha=.5, palette="muted",
height=6, data=mpg)
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="whitegrid")
# Initialize the matplotlib figure
f, ax = plt.subplots(figsize=(6, 15))
# Load the example car crash dataset
crashes = sns.load_dataset("car_crashes").sort_values("total", ascending=False)
# Plot the total crashes
sns.set_color_codes("pastel")
sns.barplot(x="total", y="abbrev", data=crashes,
label="Total", color="b")
# Plot the crashes where alcohol was involved
sns.set_color_codes("muted")
sns.barplot(x="alcohol", y="abbrev", data=crashes,
label="Alcohol-involved", color="b")
# Add a legend and informative axis label
ax.legend(ncol=2, loc="lower right", frameon=True)
ax.set(xlim=(0, 24), ylabel="",
xlabel="Automobile collisions per billion miles")
sns.despine(left=True, bottom=True)
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(style="whitegrid")
rs = np.random.RandomState(365)
values = rs.randn(365, 4).cumsum(axis=0)
dates = pd.date_range("1 1 2016", periods=365, freq="D")
data = pd.DataFrame(values, dates, columns=["A", "B", "C", "D"])
data = data.rolling(7).mean()
sns.lineplot(data=data, palette="tab10", linewidth=2.5)
import numpy as np
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
sns.set(style="ticks")
# Create a dataset with many short random walks
rs = np.random.RandomState(4)
pos = rs.randint(-1, 2, (20, 5)).cumsum(axis=1)
pos -= pos[:, 0, np.newaxis]
step = np.tile(range(5), 20)
walk = np.repeat(range(20), 5)
df = pd.DataFrame(np.c_[pos.flat, step, walk],
columns=["position", "step", "walk"])
# Initialize a grid of plots with an Axes for each walk
grid = sns.FacetGrid(df, col="walk", hue="walk", palette="tab20c",
col_wrap=4, height=1.5)
# Draw a horizontal line to show the starting point
grid.map(plt.axhline, y=0, ls=":", c=".5")
# Draw a line plot to show the trajectory of each random walk
grid.map(plt.plot, "step", "position", marker="o")
# Adjust the tick positions and labels
grid.set(xticks=np.arange(5), yticks=[-3, 3],
xlim=(-.5, 4.5), ylim=(-3.5, 3.5))
# Adjust the arrangement of the plots
grid.fig.tight_layout(w_pad=1)
| [
"seaborn.set",
"ipyleaflet.basemap_to_tiles",
"seaborn.set_color_codes",
"seaborn.despine",
"numpy.arange",
"ipyleaflet.Choropleth",
"seaborn.load_dataset",
"pandas.date_range",
"requests.get",
"seaborn.lineplot",
"numpy.random.RandomState",
"pandas.DataFrame",
"seaborn.barplot",
"ipyleafl... | [((1031, 1208), 'ipyleaflet.Choropleth', 'ipyleaflet.Choropleth', ([], {'geo_data': 'geo_json_data', 'choro_data': 'unemployment', 'colormap': 'linear.YlOrRd_04', 'border_color': '"""black"""', 'style': "{'fillOpacity': 0.8, 'dashArray': '5, 5'}"}), "(geo_data=geo_json_data, choro_data=unemployment,\n colormap=linear.YlOrRd_04, border_color='black', style={'fillOpacity': \n 0.8, 'dashArray': '5, 5'})\n", (1052, 1208), False, 'import ipyleaflet\n'), ((1226, 1267), 'ipyleaflet.Map', 'ipyleaflet.Map', ([], {'center': '(43, -100)', 'zoom': '(4)'}), '(center=(43, -100), zoom=4)\n', (1240, 1267), False, 'import ipyleaflet\n'), ((1322, 1344), 'seaborn.set', 'sns.set', ([], {'style': '"""white"""'}), "(style='white')\n", (1329, 1344), True, 'import seaborn as sns\n'), ((1383, 1406), 'seaborn.load_dataset', 'sns.load_dataset', (['"""mpg"""'], {}), "('mpg')\n", (1399, 1406), True, 'import seaborn as sns\n'), ((1472, 1606), 'seaborn.relplot', 'sns.relplot', ([], {'x': '"""horsepower"""', 'y': '"""mpg"""', 'hue': '"""origin"""', 'size': '"""weight"""', 'sizes': '(40, 400)', 'alpha': '(0.5)', 'palette': '"""muted"""', 'height': '(6)', 'data': 'mpg'}), "(x='horsepower', y='mpg', hue='origin', size='weight', sizes=(40,\n 400), alpha=0.5, palette='muted', height=6, data=mpg)\n", (1483, 1606), True, 'import seaborn as sns\n'), ((1681, 1707), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (1688, 1707), True, 'import seaborn as sns\n'), ((1752, 1781), 'matplotlib.pyplot.subplots', 'plt.subplots', ([], {'figsize': '(6, 15)'}), '(figsize=(6, 15))\n', (1764, 1781), True, 'import matplotlib.pyplot as plt\n'), ((1926, 1955), 'seaborn.set_color_codes', 'sns.set_color_codes', (['"""pastel"""'], {}), "('pastel')\n", (1945, 1955), True, 'import seaborn as sns\n'), ((1956, 2030), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""total"""', 'y': '"""abbrev"""', 'data': 'crashes', 'label': '"""Total"""', 'color': '"""b"""'}), "(x='total', y='abbrev', data=crashes, label='Total', color='b')\n", (1967, 2030), True, 'import seaborn as sns\n'), ((2090, 2118), 'seaborn.set_color_codes', 'sns.set_color_codes', (['"""muted"""'], {}), "('muted')\n", (2109, 2118), True, 'import seaborn as sns\n'), ((2119, 2210), 'seaborn.barplot', 'sns.barplot', ([], {'x': '"""alcohol"""', 'y': '"""abbrev"""', 'data': 'crashes', 'label': '"""Alcohol-involved"""', 'color': '"""b"""'}), "(x='alcohol', y='abbrev', data=crashes, label='Alcohol-involved',\n color='b')\n", (2130, 2210), True, 'import seaborn as sns\n'), ((2402, 2437), 'seaborn.despine', 'sns.despine', ([], {'left': '(True)', 'bottom': '(True)'}), '(left=True, bottom=True)\n', (2413, 2437), True, 'import seaborn as sns\n'), ((2506, 2532), 'seaborn.set', 'sns.set', ([], {'style': '"""whitegrid"""'}), "(style='whitegrid')\n", (2513, 2532), True, 'import seaborn as sns\n'), ((2539, 2565), 'numpy.random.RandomState', 'np.random.RandomState', (['(365)'], {}), '(365)\n', (2560, 2565), True, 'import numpy as np\n'), ((2615, 2663), 'pandas.date_range', 'pd.date_range', (['"""1 1 2016"""'], {'periods': '(365)', 'freq': '"""D"""'}), "('1 1 2016', periods=365, freq='D')\n", (2628, 2663), True, 'import pandas as pd\n'), ((2671, 2728), 'pandas.DataFrame', 'pd.DataFrame', (['values', 'dates'], {'columns': "['A', 'B', 'C', 'D']"}), "(values, dates, columns=['A', 'B', 'C', 'D'])\n", (2683, 2728), True, 'import pandas as pd\n'), ((2760, 2815), 'seaborn.lineplot', 'sns.lineplot', ([], {'data': 'data', 'palette': '"""tab10"""', 'linewidth': '(2.5)'}), "(data=data, palette='tab10', linewidth=2.5)\n", (2772, 2815), True, 'import seaborn as sns\n'), ((2917, 2939), 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""'}), "(style='ticks')\n", (2924, 2939), True, 'import seaborn as sns\n'), ((2994, 3018), 'numpy.random.RandomState', 'np.random.RandomState', (['(4)'], {}), '(4)\n', (3015, 3018), True, 'import numpy as np\n'), ((3161, 3240), 'pandas.DataFrame', 'pd.DataFrame', (['np.c_[pos.flat, step, walk]'], {'columns': "['position', 'step', 'walk']"}), "(np.c_[pos.flat, step, walk], columns=['position', 'step', 'walk'])\n", (3173, 3240), True, 'import pandas as pd\n'), ((3323, 3410), 'seaborn.FacetGrid', 'sns.FacetGrid', (['df'], {'col': '"""walk"""', 'hue': '"""walk"""', 'palette': '"""tab20c"""', 'col_wrap': '(4)', 'height': '(1.5)'}), "(df, col='walk', hue='walk', palette='tab20c', col_wrap=4,\n height=1.5)\n", (3336, 3410), True, 'import seaborn as sns\n'), ((408, 425), 'requests.get', 'requests.get', (['url'], {}), '(url)\n', (420, 425), False, 'import requests\n'), ((1830, 1861), 'seaborn.load_dataset', 'sns.load_dataset', (['"""car_crashes"""'], {}), "('car_crashes')\n", (1846, 1861), True, 'import seaborn as sns\n'), ((3694, 3706), 'numpy.arange', 'np.arange', (['(5)'], {}), '(5)\n', (3703, 3706), True, 'import numpy as np\n'), ((77, 148), 'ipyleaflet.basemap_to_tiles', 'basemap_to_tiles', (['basemaps.NASAGIBS.ModisTerraTrueColorCR', '"""2017-04-08"""'], {}), "(basemaps.NASAGIBS.ModisTerraTrueColorCR, '2017-04-08')\n", (93, 148), False, 'from ipyleaflet import Map, basemaps, basemap_to_tiles\n')] |
# pylint: disable=redefined-outer-name
"""Global configuration."""
import os
import shutil
import pytest
@pytest.fixture(scope="session")
def workspace_folder(tmpdir_factory):
"""Path to pytest workspace directory."""
path = str(tmpdir_factory.mktemp("workspace"))
yield path
shutil.rmtree(path)
@pytest.fixture(scope="session")
def global_setup(workspace_folder):
"""Global configuration setup."""
del workspace_folder
@pytest.fixture(autouse=True)
def workspace(global_setup, workspace_folder, doctest_namespace):
"""Folder to work from for each test."""
del global_setup # to please the pylint Gods.
# give access to expected modules in all doctest:
import numpy
doctest_namespace["numpy"] = numpy
import scipy
doctest_namespace["scipy"] = scipy
import chaospy
doctest_namespace["chaospy"] = chaospy
# fix random seeds:
from numpy.random import seed
seed(1000)
# change to workspace for the duration of test:
curdir = os.path.abspath(os.path.curdir)
os.chdir(workspace_folder)
yield workspace_folder
os.chdir(curdir)
| [
"os.chdir",
"numpy.random.seed",
"shutil.rmtree",
"pytest.fixture",
"os.path.abspath"
] | [((109, 140), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (123, 140), False, 'import pytest\n'), ((318, 349), 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), "(scope='session')\n", (332, 349), False, 'import pytest\n'), ((452, 480), 'pytest.fixture', 'pytest.fixture', ([], {'autouse': '(True)'}), '(autouse=True)\n', (466, 480), False, 'import pytest\n'), ((295, 314), 'shutil.rmtree', 'shutil.rmtree', (['path'], {}), '(path)\n', (308, 314), False, 'import shutil\n'), ((934, 944), 'numpy.random.seed', 'seed', (['(1000)'], {}), '(1000)\n', (938, 944), False, 'from numpy.random import seed\n'), ((1011, 1042), 'os.path.abspath', 'os.path.abspath', (['os.path.curdir'], {}), '(os.path.curdir)\n', (1026, 1042), False, 'import os\n'), ((1047, 1073), 'os.chdir', 'os.chdir', (['workspace_folder'], {}), '(workspace_folder)\n', (1055, 1073), False, 'import os\n'), ((1105, 1121), 'os.chdir', 'os.chdir', (['curdir'], {}), '(curdir)\n', (1113, 1121), False, 'import os\n')] |
# Classify images, based on training data
#
# Usage:
# 1. create folder with:
# - folder with training data (one folder for each type)
# - folder with images to be classified
# - this script
# 3. set required parameters:
# - data_dir = (relative) folder with traing/validation images ('document_images')
# - epoch = number of passes of the entire training dataset in the machine learning algorithm ('10')
# - path = (relative) folder with images that need to be predicted ('test')
# 3. in terminal: '$ python document_classifier_keras.py -d data_dir -p path [-e 10] '
# 4. results are written to csv file 'predicted_image_types.csv'
# see https://www.tensorflow.org/tutorials/images/classification
import matplotlib.pyplot as plt
import numpy as np
import os
import PIL
import tensorflow as tf
import pathlib
import argparse
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
# construct the argument parse and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-d", "--data_dir", default="document_images",
help="path to traing images")
ap.add_argument("-p", "--path", default="path",
help="path to input images")
ap.add_argument("-e", "--epoch", default="10", type=int,
help="number of epochs")
args = vars(ap.parse_args())
path = args["path"]
data_dir = args["data_dir"]
epoch = args["epoch"]
data_dir = pathlib.Path(data_dir)
subfolders = os.listdir(data_dir)
num_classes = len(subfolders)
# Check if files are valif jpg
print("Reading and checking files from subfolders: ", subfolders, " in ", data_dir)
print("no. of subfolders: ",num_classes)
# Filter out corrupted images
# Change folder names accordingly
num_skipped = 0
for folder_name in subfolders:
folder_path = os.path.join(data_dir, folder_name)
for fname in os.listdir(folder_path):
fpath = os.path.join(folder_path, fname)
try:
fobj = open(fpath, "rb")
is_jfif = tf.compat.as_bytes("JFIF") in fobj.peek(10)
finally:
fobj.close()
if not is_jfif:
num_skipped += 1
# Delete corrupted image
os.remove(fpath)
print("- Deleted file ", fpath)
print("Deleted %d images" % num_skipped)
# list no. of files
image_count = len(list(data_dir.glob('*/*.jpg')))
print("Total no of images: ", image_count)
# Create a dataset
# Define some parameters for the loader
batch_size = 32
img_height = 180
img_width = 180
# Create a validation split: 80% of the images for training, and 20% for validation.
train_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
val_ds = tf.keras.utils.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(img_height, img_width),
batch_size=batch_size)
class_names = train_ds.class_names
print("class_names: ", class_names)
# Configure the dataset for performance
AUTOTUNE = tf.data.AUTOTUNE
train_ds = train_ds.cache().shuffle(1000).prefetch(buffer_size=AUTOTUNE)
val_ds = val_ds.cache().prefetch(buffer_size=AUTOTUNE)
# Standardize the data
# Create the model
model = Sequential([
layers.Rescaling(1./255, input_shape=(img_height, img_width, 3)),
layers.Conv2D(16, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(32, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Conv2D(64, 3, padding='same', activation='relu'),
layers.MaxPooling2D(),
layers.Flatten(),
layers.Dense(128, activation='relu'),
layers.Dense(num_classes)
])
# Compile the model
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.summary()
# Train the model
epochs=15
history = model.fit(
train_ds,
validation_data=val_ds,
epochs=epochs
)
# Visualize training results
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(8, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# No optimization necessary; check tutorial if it is (eg. solve overfitting)
# Predict on new data
path = "test"
files = os.listdir(path)
# Create csv with predictions
csv = open('predicted_image_types.csv','w')
for f in files:
f = path+'/'+f
img = keras.preprocessing.image.load_img(
f, target_size=(img_height, img_width)
)
img_array = tf.keras.utils.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = model.predict(img_array)
score = tf.nn.softmax(predictions[0])
print(
"Image {} most likely belongs to {} with a {:.2f} percent confidence."
.format(f, class_names[np.argmax(score)], 100 * np.max(score))
)
# write result per image
csv.write(str(f))
csv.write(";")
csv.write(class_names[np.argmax(score)])
csv.write(";")
csv.write(str(100 * np.max(score)))
csv.write("\n")
print("Done. Processed all ", image_count, " images")
| [
"tensorflow.keras.layers.Dense",
"tensorflow.nn.softmax",
"tensorflow.compat.as_bytes",
"os.remove",
"os.listdir",
"tensorflow.keras.layers.Conv2D",
"argparse.ArgumentParser",
"pathlib.Path",
"matplotlib.pyplot.plot",
"numpy.max",
"tensorflow.keras.utils.img_to_array",
"tensorflow.keras.prepro... | [((1019, 1044), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (1042, 1044), False, 'import argparse\n'), ((1412, 1434), 'pathlib.Path', 'pathlib.Path', (['data_dir'], {}), '(data_dir)\n', (1424, 1434), False, 'import pathlib\n'), ((1448, 1468), 'os.listdir', 'os.listdir', (['data_dir'], {}), '(data_dir)\n', (1458, 1468), False, 'import os\n'), ((2597, 2768), 'tensorflow.keras.utils.image_dataset_from_directory', 'tf.keras.utils.image_dataset_from_directory', (['data_dir'], {'validation_split': '(0.2)', 'subset': '"""training"""', 'seed': '(123)', 'image_size': '(img_height, img_width)', 'batch_size': 'batch_size'}), "(data_dir, validation_split=0.2,\n subset='training', seed=123, image_size=(img_height, img_width),\n batch_size=batch_size)\n", (2640, 2768), True, 'import tensorflow as tf\n'), ((2784, 2957), 'tensorflow.keras.utils.image_dataset_from_directory', 'tf.keras.utils.image_dataset_from_directory', (['data_dir'], {'validation_split': '(0.2)', 'subset': '"""validation"""', 'seed': '(123)', 'image_size': '(img_height, img_width)', 'batch_size': 'batch_size'}), "(data_dir, validation_split=0.2,\n subset='validation', seed=123, image_size=(img_height, img_width),\n batch_size=batch_size)\n", (2827, 2957), True, 'import tensorflow as tf\n'), ((4213, 4239), 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), '(figsize=(8, 8))\n', (4223, 4239), True, 'import matplotlib.pyplot as plt\n'), ((4240, 4260), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), '(1, 2, 1)\n', (4251, 4260), True, 'import matplotlib.pyplot as plt\n'), ((4261, 4315), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), "(epochs_range, acc, label='Training Accuracy')\n", (4269, 4315), True, 'import matplotlib.pyplot as plt\n'), ((4316, 4376), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), "(epochs_range, val_acc, label='Validation Accuracy')\n", (4324, 4376), True, 'import matplotlib.pyplot as plt\n'), ((4377, 4406), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), "(loc='lower right')\n", (4387, 4406), True, 'import matplotlib.pyplot as plt\n'), ((4407, 4452), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), "('Training and Validation Accuracy')\n", (4416, 4452), True, 'import matplotlib.pyplot as plt\n'), ((4454, 4474), 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), '(1, 2, 2)\n', (4465, 4474), True, 'import matplotlib.pyplot as plt\n'), ((4475, 4526), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), "(epochs_range, loss, label='Training Loss')\n", (4483, 4526), True, 'import matplotlib.pyplot as plt\n'), ((4527, 4584), 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), "(epochs_range, val_loss, label='Validation Loss')\n", (4535, 4584), True, 'import matplotlib.pyplot as plt\n'), ((4585, 4614), 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), "(loc='upper right')\n", (4595, 4614), True, 'import matplotlib.pyplot as plt\n'), ((4615, 4656), 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), "('Training and Validation Loss')\n", (4624, 4656), True, 'import matplotlib.pyplot as plt\n'), ((4657, 4667), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4665, 4667), True, 'import matplotlib.pyplot as plt\n'), ((4791, 4807), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4801, 4807), False, 'import os\n'), ((1786, 1821), 'os.path.join', 'os.path.join', (['data_dir', 'folder_name'], {}), '(data_dir, folder_name)\n', (1798, 1821), False, 'import os\n'), ((1839, 1862), 'os.listdir', 'os.listdir', (['folder_path'], {}), '(folder_path)\n', (1849, 1862), False, 'import os\n'), ((4924, 4998), 'tensorflow.keras.preprocessing.image.load_img', 'keras.preprocessing.image.load_img', (['f'], {'target_size': '(img_height, img_width)'}), '(f, target_size=(img_height, img_width))\n', (4958, 4998), False, 'from tensorflow import keras\n'), ((5020, 5052), 'tensorflow.keras.utils.img_to_array', 'tf.keras.utils.img_to_array', (['img'], {}), '(img)\n', (5047, 5052), True, 'import tensorflow as tf\n'), ((5066, 5094), 'tensorflow.expand_dims', 'tf.expand_dims', (['img_array', '(0)'], {}), '(img_array, 0)\n', (5080, 5094), True, 'import tensorflow as tf\n'), ((5162, 5191), 'tensorflow.nn.softmax', 'tf.nn.softmax', (['predictions[0]'], {}), '(predictions[0])\n', (5175, 5191), True, 'import tensorflow as tf\n'), ((1880, 1912), 'os.path.join', 'os.path.join', (['folder_path', 'fname'], {}), '(folder_path, fname)\n', (1892, 1912), False, 'import os\n'), ((3300, 3367), 'tensorflow.keras.layers.Rescaling', 'layers.Rescaling', (['(1.0 / 255)'], {'input_shape': '(img_height, img_width, 3)'}), '(1.0 / 255, input_shape=(img_height, img_width, 3))\n', (3316, 3367), False, 'from tensorflow.keras import layers\n'), ((3368, 3423), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(16)', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(16, 3, padding='same', activation='relu')\n", (3381, 3423), False, 'from tensorflow.keras import layers\n'), ((3427, 3448), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (3446, 3448), False, 'from tensorflow.keras import layers\n'), ((3452, 3507), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(32)', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(32, 3, padding='same', activation='relu')\n", (3465, 3507), False, 'from tensorflow.keras import layers\n'), ((3511, 3532), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (3530, 3532), False, 'from tensorflow.keras import layers\n'), ((3536, 3591), 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(64)', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), "(64, 3, padding='same', activation='relu')\n", (3549, 3591), False, 'from tensorflow.keras import layers\n'), ((3595, 3616), 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {}), '()\n', (3614, 3616), False, 'from tensorflow.keras import layers\n'), ((3620, 3636), 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), '()\n', (3634, 3636), False, 'from tensorflow.keras import layers\n'), ((3640, 3676), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128)'], {'activation': '"""relu"""'}), "(128, activation='relu')\n", (3652, 3676), False, 'from tensorflow.keras import layers\n'), ((3680, 3705), 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_classes'], {}), '(num_classes)\n', (3692, 3705), False, 'from tensorflow.keras import layers\n'), ((3781, 3844), 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), '(from_logits=True)\n', (3826, 3844), True, 'import tensorflow as tf\n'), ((2174, 2190), 'os.remove', 'os.remove', (['fpath'], {}), '(fpath)\n', (2183, 2190), False, 'import os\n'), ((5433, 5449), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (5442, 5449), True, 'import numpy as np\n'), ((1985, 2011), 'tensorflow.compat.as_bytes', 'tf.compat.as_bytes', (['"""JFIF"""'], {}), "('JFIF')\n", (2003, 2011), True, 'import tensorflow as tf\n'), ((5305, 5321), 'numpy.argmax', 'np.argmax', (['score'], {}), '(score)\n', (5314, 5321), True, 'import numpy as np\n'), ((5330, 5343), 'numpy.max', 'np.max', (['score'], {}), '(score)\n', (5336, 5343), True, 'import numpy as np\n'), ((5489, 5502), 'numpy.max', 'np.max', (['score'], {}), '(score)\n', (5495, 5502), True, 'import numpy as np\n')] |
from dataprocessing import create_feature_sets_and_labels
import tensorflow as tf
import pickle
import numpy as np
train_x,train_y,test_x,test_y = create_feature_sets_and_labels('pos.txt','neg.txt')
n_nodes_hl1 = 1500
n_nodes_hl2 = 1500
n_nodes_hl3 = 1500
n_classes = 2
batch_size = 100
hm_epochs = 10
x = tf.placeholder('float')
y = tf.placeholder('float')
hidden_1_layer = {'f_fum':n_nodes_hl1,
'weight':tf.Variable(tf.random_normal([len(train_x[0]), n_nodes_hl1])),
'bias':tf.Variable(tf.random_normal([n_nodes_hl1]))}
hidden_2_layer = {'f_fum':n_nodes_hl2,
'weight':tf.Variable(tf.random_normal([n_nodes_hl1, n_nodes_hl2])),
'bias':tf.Variable(tf.random_normal([n_nodes_hl2]))}
hidden_3_layer = {'f_fum':n_nodes_hl3,
'weight':tf.Variable(tf.random_normal([n_nodes_hl2, n_nodes_hl3])),
'bias':tf.Variable(tf.random_normal([n_nodes_hl3]))}
output_layer = {'f_fum':None,
'weight':tf.Variable(tf.random_normal([n_nodes_hl3, n_classes])),
'bias':tf.Variable(tf.random_normal([n_classes])),}
# Nothing changes
def neural_network_model(data):
l1 = tf.add(tf.matmul(data,hidden_1_layer['weight']), hidden_1_layer['bias'])
l1 = tf.nn.relu(l1)
l2 = tf.add(tf.matmul(l1,hidden_2_layer['weight']), hidden_2_layer['bias'])
l2 = tf.nn.relu(l2)
l3 = tf.add(tf.matmul(l2,hidden_3_layer['weight']), hidden_3_layer['bias'])
l3 = tf.nn.relu(l3)
output = tf.matmul(l3,output_layer['weight']) + output_layer['bias']
return output
def train_neural_network(x):
prediction = neural_network_model(x)
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits = prediction, labels = y))
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(cost)
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
for epoch in range(hm_epochs):
epoch_loss = 0
i=0
while i < len(train_x):
start = i
end = i+batch_size
batch_x = np.array(train_x[start:end])
batch_y = np.array(train_y[start:end])
_, c = sess.run([optimizer, cost], feed_dict={x: batch_x,y: batch_y})
epoch_loss += c
i+=batch_size
print('Epoch', epoch+1, 'completed out of',hm_epochs,'loss:',epoch_loss)
correct = tf.equal(tf.argmax(prediction, 1), tf.argmax(y, 1))
accuracy = tf.reduce_mean(tf.cast(correct, 'float'))
print('Accuracy:',accuracy.eval({x:test_x, y:test_y}))
train_neural_network(x) | [
"tensorflow.initialize_all_variables",
"tensorflow.random_normal",
"tensorflow.nn.relu",
"dataprocessing.create_feature_sets_and_labels",
"tensorflow.placeholder",
"tensorflow.Session",
"tensorflow.nn.softmax_cross_entropy_with_logits_v2",
"numpy.array",
"tensorflow.argmax",
"tensorflow.matmul",
... | [((155, 207), 'dataprocessing.create_feature_sets_and_labels', 'create_feature_sets_and_labels', (['"""pos.txt"""', '"""neg.txt"""'], {}), "('pos.txt', 'neg.txt')\n", (185, 207), False, 'from dataprocessing import create_feature_sets_and_labels\n'), ((327, 350), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (341, 350), True, 'import tensorflow as tf\n'), ((356, 379), 'tensorflow.placeholder', 'tf.placeholder', (['"""float"""'], {}), "('float')\n", (370, 379), True, 'import tensorflow as tf\n'), ((1323, 1337), 'tensorflow.nn.relu', 'tf.nn.relu', (['l1'], {}), '(l1)\n', (1333, 1337), True, 'import tensorflow as tf\n'), ((1431, 1445), 'tensorflow.nn.relu', 'tf.nn.relu', (['l2'], {}), '(l2)\n', (1441, 1445), True, 'import tensorflow as tf\n'), ((1539, 1553), 'tensorflow.nn.relu', 'tf.nn.relu', (['l3'], {}), '(l3)\n', (1549, 1553), True, 'import tensorflow as tf\n'), ((551, 582), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl1]'], {}), '([n_nodes_hl1])\n', (567, 582), True, 'import tensorflow as tf\n'), ((667, 711), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl1, n_nodes_hl2]'], {}), '([n_nodes_hl1, n_nodes_hl2])\n', (683, 711), True, 'import tensorflow as tf\n'), ((752, 783), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl2]'], {}), '([n_nodes_hl2])\n', (768, 783), True, 'import tensorflow as tf\n'), ((868, 912), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl2, n_nodes_hl3]'], {}), '([n_nodes_hl2, n_nodes_hl3])\n', (884, 912), True, 'import tensorflow as tf\n'), ((953, 984), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl3]'], {}), '([n_nodes_hl3])\n', (969, 984), True, 'import tensorflow as tf\n'), ((1058, 1100), 'tensorflow.random_normal', 'tf.random_normal', (['[n_nodes_hl3, n_classes]'], {}), '([n_nodes_hl3, n_classes])\n', (1074, 1100), True, 'import tensorflow as tf\n'), ((1139, 1168), 'tensorflow.random_normal', 'tf.random_normal', (['[n_classes]'], {}), '([n_classes])\n', (1155, 1168), True, 'import tensorflow as tf\n'), ((1247, 1288), 'tensorflow.matmul', 'tf.matmul', (['data', "hidden_1_layer['weight']"], {}), "(data, hidden_1_layer['weight'])\n", (1256, 1288), True, 'import tensorflow as tf\n'), ((1357, 1396), 'tensorflow.matmul', 'tf.matmul', (['l1', "hidden_2_layer['weight']"], {}), "(l1, hidden_2_layer['weight'])\n", (1366, 1396), True, 'import tensorflow as tf\n'), ((1465, 1504), 'tensorflow.matmul', 'tf.matmul', (['l2', "hidden_3_layer['weight']"], {}), "(l2, hidden_3_layer['weight'])\n", (1474, 1504), True, 'import tensorflow as tf\n'), ((1570, 1607), 'tensorflow.matmul', 'tf.matmul', (['l3', "output_layer['weight']"], {}), "(l3, output_layer['weight'])\n", (1579, 1607), True, 'import tensorflow as tf\n'), ((1746, 1817), 'tensorflow.nn.softmax_cross_entropy_with_logits_v2', 'tf.nn.softmax_cross_entropy_with_logits_v2', ([], {'logits': 'prediction', 'labels': 'y'}), '(logits=prediction, labels=y)\n', (1788, 1817), True, 'import tensorflow as tf\n'), ((1911, 1923), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (1921, 1923), True, 'import tensorflow as tf\n'), ((1837, 1880), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (1859, 1880), True, 'import tensorflow as tf\n'), ((1945, 1974), 'tensorflow.initialize_all_variables', 'tf.initialize_all_variables', ([], {}), '()\n', (1972, 1974), True, 'import tensorflow as tf\n'), ((2421, 2445), 'tensorflow.argmax', 'tf.argmax', (['prediction', '(1)'], {}), '(prediction, 1)\n', (2430, 2445), True, 'import tensorflow as tf\n'), ((2447, 2462), 'tensorflow.argmax', 'tf.argmax', (['y', '(1)'], {}), '(y, 1)\n', (2456, 2462), True, 'import tensorflow as tf\n'), ((2493, 2518), 'tensorflow.cast', 'tf.cast', (['correct', '"""float"""'], {}), "(correct, 'float')\n", (2500, 2518), True, 'import tensorflow as tf\n'), ((2126, 2154), 'numpy.array', 'np.array', (['train_x[start:end]'], {}), '(train_x[start:end])\n', (2134, 2154), True, 'import numpy as np\n'), ((2170, 2198), 'numpy.array', 'np.array', (['train_y[start:end]'], {}), '(train_y[start:end])\n', (2178, 2198), True, 'import numpy as np\n')] |
import numpy
import tqdm
from sklearn.manifold import TSNE
class _Transform:
def __init__(self):
pass
def fit(self, X):
return self.transform.fit_transform(X)
class tSNE(_Transform):
def __init__(self):
super().__init__()
self.transform = TSNE(n_components=2, verbose=1, perplexity=40, n_iter=300)
class Visualizer(object):
def visualize_word_emeddings(self, embeddings, sentences, output_file):
X = self.prepare_word_embeddings(embeddings, sentences)
contexts = self.word_contexts(sentences)
trans_ = tSNE()
reduced = trans_.fit(X)
self.visualize(reduced, contexts, output_file)
def visualize_char_emeddings(self, embeddings, sentences, output_file):
X = self.prepare_char_embeddings(embeddings, sentences)
contexts = self.char_contexts(sentences)
trans_ = tSNE()
reduced = trans_.fit(X)
self.visualize(reduced, contexts, output_file)
@staticmethod
def prepare_word_embeddings(embeddings, sentences):
X = []
for sentence in tqdm.tqdm(sentences):
embeddings.embed(sentence)
for i, token in enumerate(sentence):
X.append(token.embedding.detach().numpy()[None, :])
X = numpy.concatenate(X, 0)
return X
@staticmethod
def word_contexts(sentences):
contexts = []
for sentence in sentences:
strs = [x.text for x in sentence.tokens]
for i, token in enumerate(strs):
prop = '<b><font color="red"> {token} </font></b>'.format(token=token)
prop = " ".join(strs[max(i - 4, 0) : i]) + prop
prop = prop + " ".join(strs[i + 1 : min(len(strs), i + 5)])
contexts.append("<p>" + prop + "</p>")
return contexts
@staticmethod
def prepare_char_embeddings(embeddings, sentences):
X = []
for sentence in tqdm.tqdm(sentences):
sentence = " ".join([x.text for x in sentence])
hidden = embeddings.lm.get_representation([sentence])
X.append(hidden.squeeze().detach().numpy())
X = numpy.concatenate(X, 0)
return X
@staticmethod
def char_contexts(sentences):
contexts = []
for sentence in sentences:
sentence = " ".join([token.text for token in sentence])
for i, char in enumerate(sentence):
context = '<span style="background-color: yellow"><b>{}</b></span>'.format(
char
)
context = "".join(sentence[max(i - 30, 0) : i]) + context
context = context + "".join(
sentence[i + 1 : min(len(sentence), i + 30)]
)
contexts.append(context)
return contexts
@staticmethod
def visualize(X, contexts, file):
import matplotlib.pyplot
import mpld3
fig, ax = matplotlib.pyplot.subplots()
ax.grid(True, alpha=0.3)
points = ax.plot(
X[:, 0], X[:, 1], "o", color="b", mec="k", ms=5, mew=1, alpha=0.6
)
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_title("Hover mouse to reveal context", size=20)
tooltip = mpld3.plugins.PointHTMLTooltip(
points[0], contexts, voffset=10, hoffset=10
)
mpld3.plugins.connect(fig, tooltip)
mpld3.save_html(fig, file)
| [
"mpld3.plugins.connect",
"tqdm.tqdm",
"sklearn.manifold.TSNE",
"mpld3.save_html",
"mpld3.plugins.PointHTMLTooltip",
"numpy.concatenate"
] | [((289, 347), 'sklearn.manifold.TSNE', 'TSNE', ([], {'n_components': '(2)', 'verbose': '(1)', 'perplexity': '(40)', 'n_iter': '(300)'}), '(n_components=2, verbose=1, perplexity=40, n_iter=300)\n', (293, 347), False, 'from sklearn.manifold import TSNE\n'), ((1096, 1116), 'tqdm.tqdm', 'tqdm.tqdm', (['sentences'], {}), '(sentences)\n', (1105, 1116), False, 'import tqdm\n'), ((1288, 1311), 'numpy.concatenate', 'numpy.concatenate', (['X', '(0)'], {}), '(X, 0)\n', (1305, 1311), False, 'import numpy\n'), ((1965, 1985), 'tqdm.tqdm', 'tqdm.tqdm', (['sentences'], {}), '(sentences)\n', (1974, 1985), False, 'import tqdm\n'), ((2183, 2206), 'numpy.concatenate', 'numpy.concatenate', (['X', '(0)'], {}), '(X, 0)\n', (2200, 2206), False, 'import numpy\n'), ((3302, 3377), 'mpld3.plugins.PointHTMLTooltip', 'mpld3.plugins.PointHTMLTooltip', (['points[0]', 'contexts'], {'voffset': '(10)', 'hoffset': '(10)'}), '(points[0], contexts, voffset=10, hoffset=10)\n', (3332, 3377), False, 'import mpld3\n'), ((3409, 3444), 'mpld3.plugins.connect', 'mpld3.plugins.connect', (['fig', 'tooltip'], {}), '(fig, tooltip)\n', (3430, 3444), False, 'import mpld3\n'), ((3454, 3480), 'mpld3.save_html', 'mpld3.save_html', (['fig', 'file'], {}), '(fig, file)\n', (3469, 3480), False, 'import mpld3\n')] |
import os, datetime, gc, warnings, glob
from natsort import natsorted
import numpy as np
import cv2
import tifffile
import logging
from .. import utils, plot, transforms
from ..io import imread, imsave, outlines_to_text
import omnipose
try:
from PyQt5.QtWidgets import QFileDialog
GUI = True
except:
GUI = False
try:
import matplotlib.pyplot as plt
MATPLOTLIB = True
except:
MATPLOTLIB = False
NCOLOR = False
# WIP to make GUI use N-color masks. Tricky thing is that only the display should be
# reduced to N colors; selection and editing should act on unique labels.
def _load_image(parent, filename=None):
""" load image with filename; if None, open QFileDialog """
if filename is None:
name = QFileDialog.getOpenFileName(
parent, "Load image"
)
filename = name[0]
manual_file = os.path.splitext(filename)[0]+'_seg.npy'
if os.path.isfile(manual_file):
print(manual_file)
_load_seg(parent, manual_file, image=imread(filename), image_file=filename)
return
elif os.path.isfile(os.path.splitext(filename)[0]+'_manual.npy'):
manual_file = os.path.splitext(filename)[0]+'_manual.npy'
_load_seg(parent, manual_file, image=imread(filename), image_file=filename)
return
try:
image = imread(filename)
parent.loaded = True
except:
print('images not compatible')
if parent.loaded:
parent.reset()
parent.filename = filename
print(filename)
filename = os.path.split(parent.filename)[-1]
_initialize_images(parent, image, resize=parent.resize, X2=0)
parent.clear_all()
parent.loaded = True
parent.enable_buttons()
parent.threshslider.setEnabled(False)
parent.probslider.setEnabled(False)
def _initialize_images(parent, image, resize, X2):
""" format image for GUI """
parent.onechan=False
if image.ndim > 3:
# make tiff Z x channels x W x H
if image.shape[0]<4:
# tiff is channels x Z x W x H
image = np.transpose(image, (1,0,2,3))
elif image.shape[-1]<4:
# tiff is Z x W x H x channels
image = np.transpose(image, (0,3,1,2))
# fill in with blank channels to make 3 channels
if image.shape[1] < 3:
shape = image.shape
image = np.concatenate((image,
np.zeros((shape[0], 3-shape[1], shape[2], shape[3]), dtype=np.uint8)), axis=1)
if 3-shape[1]>1:
parent.onechan=True
image = np.transpose(image, (0,2,3,1))
elif image.ndim==3:
if image.shape[0] < 5:
image = np.transpose(image, (1,2,0))
if image.shape[-1] < 3:
shape = image.shape
image = np.concatenate((image,np.zeros((shape[0], shape[1], 3-shape[2]),dtype=type(image[0,0,0]))), axis=-1)
if 3-shape[2]>1:
parent.onechan=True
image = image[np.newaxis,...]
elif image.shape[-1]<5 and image.shape[-1]>2:
image = image[:,:,:3]
image = image[np.newaxis,...]
else:
image = image[np.newaxis,...]
parent.stack = image
parent.NZ = len(parent.stack)
parent.scroll.setMaximum(parent.NZ-1)
if parent.stack.max()>255 or parent.stack.min()<0.0 or parent.stack.max()<=50.0:
parent.stack = parent.stack.astype(np.float32)
parent.stack -= parent.stack.min()
parent.stack /= parent.stack.max()
parent.stack *= 255
del image
gc.collect()
parent.stack = list(parent.stack)
for k,img in enumerate(parent.stack):
# if grayscale make 3D
if resize != -1:
img = transforms._image_resizer(img, resize=resize, to_uint8=False)
if img.ndim==2:
img = np.tile(img[:,:,np.newaxis], (1,1,3))
parent.onechan=True
if X2!=0:
img = transforms._X2zoom(img, X2=X2)
parent.stack[k] = img
parent.imask=0
print(parent.NZ, parent.stack[0].shape)
parent.Ly, parent.Lx = img.shape[0], img.shape[1]
parent.stack = np.array(parent.stack)
parent.layers = 0*np.ones((parent.NZ,parent.Ly,parent.Lx,4), np.uint8)
if parent.autobtn.isChecked() or len(parent.saturation)!=parent.NZ:
parent.compute_saturation()
parent.compute_scale()
parent.currentZ = int(np.floor(parent.NZ/2))
parent.scroll.setValue(parent.currentZ)
parent.zpos.setText(str(parent.currentZ))
def _load_seg(parent, filename=None, image=None, image_file=None):
""" load *_seg.npy with filename; if None, open QFileDialog """
if filename is None:
name = QFileDialog.getOpenFileName(
parent, "Load labelled data", filter="*.npy"
)
filename = name[0]
try:
dat = np.load(filename, allow_pickle=True).item()
dat['outlines']
parent.loaded = True
except:
parent.loaded = False
print('not NPY')
return
parent.reset()
if image is None:
found_image = False
if 'filename' in dat:
parent.filename = dat['filename']
if os.path.isfile(parent.filename):
parent.filename = dat['filename']
found_image = True
else:
imgname = os.path.split(parent.filename)[1]
root = os.path.split(filename)[0]
parent.filename = root+'/'+imgname
if os.path.isfile(parent.filename):
found_image = True
if found_image:
try:
image = imread(parent.filename)
except:
parent.loaded = False
found_image = False
print('ERROR: cannot find image file, loading from npy')
if not found_image:
parent.filename = filename[:-11]
if 'img' in dat:
image = dat['img']
else:
print('ERROR: no image file found and no image in npy')
return
else:
parent.filename = image_file
print(parent.filename)
if 'X2' in dat:
parent.X2 = dat['X2']
else:
parent.X2 = 0
if 'resize' in dat:
parent.resize = dat['resize']
elif 'img' in dat:
if max(image.shape) > max(dat['img'].shape):
parent.resize = max(dat['img'].shape)
else:
parent.resize = -1
_initialize_images(parent, image, resize=parent.resize, X2=parent.X2)
if 'chan_choose' in dat:
parent.ChannelChoose[0].setCurrentIndex(dat['chan_choose'][0])
parent.ChannelChoose[1].setCurrentIndex(dat['chan_choose'][1])
if 'outlines' in dat:
if isinstance(dat['outlines'], list):
# old way of saving files
dat['outlines'] = dat['outlines'][::-1]
for k, outline in enumerate(dat['outlines']):
if 'colors' in dat:
color = dat['colors'][k]
else:
col_rand = np.random.randint(1000)
color = parent.colormap[col_rand,:3]
median = parent.add_mask(points=outline, color=color)
if median is not None:
parent.cellcolors.append(color)
parent.ncells+=1
else:
if dat['masks'].ndim==2:
dat['masks'] = dat['masks'][np.newaxis,:,:]
dat['outlines'] = dat['outlines'][np.newaxis,:,:]
if dat['masks'].min()==-1:
dat['masks'] += 1
dat['outlines'] += 1
if 'colors' in dat:
colors = dat['colors']
else:
col_rand = np.random.randint(0, 1000, (dat['masks'].max(),))
colors = parent.colormap[col_rand,:3]
parent.cellpix = dat['masks']
parent.outpix = dat['outlines']
parent.cellcolors.extend(colors)
parent.ncells = parent.cellpix.max()
parent.draw_masks()
if 'est_diam' in dat:
parent.Diameter.setText('%0.1f'%dat['est_diam'])
parent.diameter = dat['est_diam']
parent.compute_scale()
if parent.masksOn or parent.outlinesOn and not (parent.masksOn and parent.outlinesOn):
parent.redraw_masks(masks=parent.masksOn, outlines=parent.outlinesOn)
if 'zdraw' in dat:
parent.zdraw = dat['zdraw']
else:
parent.zdraw = [None for n in range(parent.ncells)]
parent.loaded = True
print('%d masks found'%(parent.ncells))
else:
parent.clear_all()
parent.ismanual = np.zeros(parent.ncells, bool)
if 'ismanual' in dat:
if len(dat['ismanual']) == parent.ncells:
parent.ismanual = dat['ismanual']
if 'current_channel' in dat:
parent.color = (dat['current_channel']+2)%5
parent.RGBDropDown.setCurrentIndex(parent.color)
if 'flows' in dat:
parent.flows = dat['flows']
if parent.flows[0].shape[-3]!=dat['masks'].shape[-2]:
Ly, Lx = dat['masks'].shape[-2:]
parent.flows[0] = cv2.resize(parent.flows[0][0], (Lx, Ly), interpolation=cv2.INTER_NEAREST)[np.newaxis,...]
parent.flows[1] = cv2.resize(parent.flows[1][0], (Lx, Ly), interpolation=cv2.INTER_NEAREST)[np.newaxis,...]
try:
if parent.NZ==1:
parent.threshslider.setEnabled(True)
parent.probslider.setEnabled(True)
else:
parent.threshslider.setEnabled(False)
parent.probslider.setEnabled(False)
except:
try:
if len(parent.flows[0])>0:
parent.flows = parent.flows[0]
except:
parent.flows = [[],[],[],[],[[]]]
parent.threshslider.setEnabled(False)
parent.probslider.setEnabled(False)
parent.enable_buttons()
del dat
gc.collect()
def _load_masks(parent, filename=None):
""" load zeros-based masks (0=no cell, 1=cell 1, ...) """
if filename is None:
name = QFileDialog.getOpenFileName(
parent, "Load masks (PNG or TIFF)"
)
filename = name[0]
masks = imread(filename)
outlines = None
if masks.ndim>3:
# Z x nchannels x Ly x Lx
if masks.shape[-1]>5:
parent.flows = list(np.transpose(masks[:,:,:,2:], (3,0,1,2)))
outlines = masks[...,1]
masks = masks[...,0]
else:
parent.flows = list(np.transpose(masks[:,:,:,1:], (3,0,1,2)))
masks = masks[...,0]
elif masks.ndim==3:
if masks.shape[-1]<5:
masks = masks[np.newaxis,:,:,0]
elif masks.ndim<3:
masks = masks[np.newaxis,:,:]
# masks should be Z x Ly x Lx
if masks.shape[0]!=parent.NZ:
print('ERROR: masks are not same depth (number of planes) as image stack')
return
print('%d masks found'%(len(np.unique(masks))-1))
_masks_to_gui(parent, masks, outlines)
parent.update_plot()
def _masks_to_gui(parent, masks, outlines=None):
""" masks loaded into GUI """
# get unique values
shape = masks.shape
if NCOLOR:
masks = omnipose.utils.ncolorlabel(masks)
else:
_, masks = np.unique(masks, return_inverse=True)
masks = np.reshape(masks, shape)
masks = masks.astype(np.uint16) if masks.max()<2**16-1 else masks.astype(np.uint32)
parent.cellpix = masks
# get outlines
if outlines is None: # parent.outlinesOn
parent.outpix = np.zeros_like(masks)
for z in range(parent.NZ):
outlines = utils.masks_to_outlines(masks[z])
parent.outpix[z] = outlines * masks[z]
if z%50==0:
print('plane %d outlines processed'%z)
else:
parent.outpix = outlines
shape = parent.outpix.shape
_,parent.outpix = np.unique(parent.outpix, return_inverse=True)
parent.outpix = np.reshape(parent.outpix, shape)
parent.ncells = parent.cellpix.max()
np.random.seed(42) #try to make a bit more stable
if NCOLOR:
colors = parent.colormap[np.linspace(0,255,parent.ncells).astype(int), :3]
else:
colors = parent.colormap[np.random.randint(0,1000,size=parent.ncells), :3]
parent.cellcolors = list(np.concatenate((np.array([[255,255,255]]), colors), axis=0).astype(np.uint8))
parent.draw_masks()
parent.redraw_masks(masks=parent.masksOn, outlines=parent.outlinesOn) # add to obey outline/mask setting upon recomputing
if parent.ncells>0:
parent.toggle_mask_ops()
parent.ismanual = np.zeros(parent.ncells, bool)
parent.zdraw = list(-1*np.ones(parent.ncells, np.int16))
parent.update_plot()
def _save_png(parent):
""" save masks to png or tiff (if 3D) """
filename = parent.filename
base = os.path.splitext(filename)[0]
if parent.NZ==1:
print('saving 2D masks to png')
imsave(base + '_cp_masks.png', parent.cellpix[0])
else:
print('saving 3D masks to tiff')
imsave(base + '_cp_masks.tif', parent.cellpix)
def _save_outlines(parent):
filename = parent.filename
base = os.path.splitext(filename)[0]
if parent.NZ==1:
print('saving 2D outlines to text file, see docs for info to load into ImageJ')
outlines = utils.outlines_list(parent.cellpix[0])
outlines_to_text(base, outlines)
else:
print('ERROR: cannot save 3D outlines')
def _save_sets(parent):
""" save masks to *_seg.npy """
filename = parent.filename
base = os.path.splitext(filename)[0]
if parent.NZ > 1 and parent.is_stack:
np.save(base + '_seg.npy',
{'outlines': parent.outpix,
'colors': parent.cellcolors[1:],
'masks': parent.cellpix,
'current_channel': (parent.color-2)%5,
'filename': parent.filename,
'flows': parent.flows,
'zdraw': parent.zdraw})
else:
image = parent.chanchoose(parent.stack[parent.currentZ].copy())
if image.ndim < 4:
image = image[np.newaxis,...]
np.save(base + '_seg.npy',
{'outlines': parent.outpix.squeeze(),
'colors': parent.cellcolors[1:],
'masks': parent.cellpix.squeeze(),
'chan_choose': [parent.ChannelChoose[0].currentIndex(),
parent.ChannelChoose[1].currentIndex()],
'img': image.squeeze(),
'ismanual': parent.ismanual,
'X2': parent.X2,
'filename': parent.filename,
'flows': parent.flows})
#print(parent.point_sets)
print('--- %d ROIs saved chan1 %s, chan2 %s'%(parent.ncells,
parent.ChannelChoose[0].currentText(),
parent.ChannelChoose[1].currentText())) | [
"numpy.array",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName",
"numpy.save",
"numpy.reshape",
"os.path.split",
"numpy.linspace",
"numpy.random.seed",
"numpy.tile",
"numpy.ones",
"omnipose.utils.ncolorlabel",
"numpy.floor",
"os.path.splitext",
"os.path.isfile",
"gc.collect",
"cv2.resize",
... | [((917, 944), 'os.path.isfile', 'os.path.isfile', (['manual_file'], {}), '(manual_file)\n', (931, 944), False, 'import os, datetime, gc, warnings, glob\n'), ((3607, 3619), 'gc.collect', 'gc.collect', ([], {}), '()\n', (3617, 3619), False, 'import os, datetime, gc, warnings, glob\n'), ((4187, 4209), 'numpy.array', 'np.array', (['parent.stack'], {}), '(parent.stack)\n', (4195, 4209), True, 'import numpy as np\n'), ((8754, 8783), 'numpy.zeros', 'np.zeros', (['parent.ncells', 'bool'], {}), '(parent.ncells, bool)\n', (8762, 8783), True, 'import numpy as np\n'), ((10078, 10090), 'gc.collect', 'gc.collect', ([], {}), '()\n', (10088, 10090), False, 'import os, datetime, gc, warnings, glob\n'), ((12214, 12232), 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), '(42)\n', (12228, 12232), True, 'import numpy as np\n'), ((12799, 12828), 'numpy.zeros', 'np.zeros', (['parent.ncells', 'bool'], {}), '(parent.ncells, bool)\n', (12807, 12828), True, 'import numpy as np\n'), ((748, 797), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['parent', '"""Load image"""'], {}), "(parent, 'Load image')\n", (775, 797), False, 'from PyQt5.QtWidgets import QFileDialog\n'), ((2623, 2656), 'numpy.transpose', 'np.transpose', (['image', '(0, 2, 3, 1)'], {}), '(image, (0, 2, 3, 1))\n', (2635, 2656), True, 'import numpy as np\n'), ((4232, 4287), 'numpy.ones', 'np.ones', (['(parent.NZ, parent.Ly, parent.Lx, 4)', 'np.uint8'], {}), '((parent.NZ, parent.Ly, parent.Lx, 4), np.uint8)\n', (4239, 4287), True, 'import numpy as np\n'), ((4446, 4469), 'numpy.floor', 'np.floor', (['(parent.NZ / 2)'], {}), '(parent.NZ / 2)\n', (4454, 4469), True, 'import numpy as np\n'), ((4735, 4808), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['parent', '"""Load labelled data"""'], {'filter': '"""*.npy"""'}), "(parent, 'Load labelled data', filter='*.npy')\n", (4762, 4808), False, 'from PyQt5.QtWidgets import QFileDialog\n'), ((10234, 10297), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['parent', '"""Load masks (PNG or TIFF)"""'], {}), "(parent, 'Load masks (PNG or TIFF)')\n", (10261, 10297), False, 'from PyQt5.QtWidgets import QFileDialog\n'), ((11366, 11399), 'omnipose.utils.ncolorlabel', 'omnipose.utils.ncolorlabel', (['masks'], {}), '(masks)\n', (11392, 11399), False, 'import omnipose\n'), ((11430, 11467), 'numpy.unique', 'np.unique', (['masks'], {'return_inverse': '(True)'}), '(masks, return_inverse=True)\n', (11439, 11467), True, 'import numpy as np\n'), ((11484, 11508), 'numpy.reshape', 'np.reshape', (['masks', 'shape'], {}), '(masks, shape)\n', (11494, 11508), True, 'import numpy as np\n'), ((11717, 11737), 'numpy.zeros_like', 'np.zeros_like', (['masks'], {}), '(masks)\n', (11730, 11737), True, 'import numpy as np\n'), ((12065, 12110), 'numpy.unique', 'np.unique', (['parent.outpix'], {'return_inverse': '(True)'}), '(parent.outpix, return_inverse=True)\n', (12074, 12110), True, 'import numpy as np\n'), ((12135, 12167), 'numpy.reshape', 'np.reshape', (['parent.outpix', 'shape'], {}), '(parent.outpix, shape)\n', (12145, 12167), True, 'import numpy as np\n'), ((13027, 13053), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (13043, 13053), False, 'import os, datetime, gc, warnings, glob\n'), ((13353, 13379), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (13369, 13379), False, 'import os, datetime, gc, warnings, glob\n'), ((13761, 13787), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (13777, 13787), False, 'import os, datetime, gc, warnings, glob\n'), ((13841, 14086), 'numpy.save', 'np.save', (["(base + '_seg.npy')", "{'outlines': parent.outpix, 'colors': parent.cellcolors[1:], 'masks':\n parent.cellpix, 'current_channel': (parent.color - 2) % 5, 'filename':\n parent.filename, 'flows': parent.flows, 'zdraw': parent.zdraw}"], {}), "(base + '_seg.npy', {'outlines': parent.outpix, 'colors': parent.\n cellcolors[1:], 'masks': parent.cellpix, 'current_channel': (parent.\n color - 2) % 5, 'filename': parent.filename, 'flows': parent.flows,\n 'zdraw': parent.zdraw})\n", (13848, 14086), True, 'import numpy as np\n'), ((869, 895), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (885, 895), False, 'import os, datetime, gc, warnings, glob\n'), ((1553, 1583), 'os.path.split', 'os.path.split', (['parent.filename'], {}), '(parent.filename)\n', (1566, 1583), False, 'import os, datetime, gc, warnings, glob\n'), ((2115, 2148), 'numpy.transpose', 'np.transpose', (['image', '(1, 0, 2, 3)'], {}), '(image, (1, 0, 2, 3))\n', (2127, 2148), True, 'import numpy as np\n'), ((3879, 3920), 'numpy.tile', 'np.tile', (['img[:, :, np.newaxis]', '(1, 1, 3)'], {}), '(img[:, :, np.newaxis], (1, 1, 3))\n', (3886, 3920), True, 'import numpy as np\n'), ((5225, 5256), 'os.path.isfile', 'os.path.isfile', (['parent.filename'], {}), '(parent.filename)\n', (5239, 5256), False, 'import os, datetime, gc, warnings, glob\n'), ((12856, 12888), 'numpy.ones', 'np.ones', (['parent.ncells', 'np.int16'], {}), '(parent.ncells, np.int16)\n', (12863, 12888), True, 'import numpy as np\n'), ((2241, 2274), 'numpy.transpose', 'np.transpose', (['image', '(0, 3, 1, 2)'], {}), '(image, (0, 3, 1, 2))\n', (2253, 2274), True, 'import numpy as np\n'), ((2729, 2759), 'numpy.transpose', 'np.transpose', (['image', '(1, 2, 0)'], {}), '(image, (1, 2, 0))\n', (2741, 2759), True, 'import numpy as np\n'), ((4885, 4921), 'numpy.load', 'np.load', (['filename'], {'allow_pickle': '(True)'}), '(filename, allow_pickle=True)\n', (4892, 4921), True, 'import numpy as np\n'), ((5541, 5572), 'os.path.isfile', 'os.path.isfile', (['parent.filename'], {}), '(parent.filename)\n', (5555, 5572), False, 'import os, datetime, gc, warnings, glob\n'), ((9246, 9319), 'cv2.resize', 'cv2.resize', (['parent.flows[0][0]', '(Lx, Ly)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(parent.flows[0][0], (Lx, Ly), interpolation=cv2.INTER_NEAREST)\n', (9256, 9319), False, 'import cv2\n'), ((9366, 9439), 'cv2.resize', 'cv2.resize', (['parent.flows[1][0]', '(Lx, Ly)'], {'interpolation': 'cv2.INTER_NEAREST'}), '(parent.flows[1][0], (Lx, Ly), interpolation=cv2.INTER_NEAREST)\n', (9376, 9439), False, 'import cv2\n'), ((10517, 10563), 'numpy.transpose', 'np.transpose', (['masks[:, :, :, 2:]', '(3, 0, 1, 2)'], {}), '(masks[:, :, :, 2:], (3, 0, 1, 2))\n', (10529, 10563), True, 'import numpy as np\n'), ((10674, 10720), 'numpy.transpose', 'np.transpose', (['masks[:, :, :, 1:]', '(3, 0, 1, 2)'], {}), '(masks[:, :, :, 1:], (3, 0, 1, 2))\n', (10686, 10720), True, 'import numpy as np\n'), ((12411, 12457), 'numpy.random.randint', 'np.random.randint', (['(0)', '(1000)'], {'size': 'parent.ncells'}), '(0, 1000, size=parent.ncells)\n', (12428, 12457), True, 'import numpy as np\n'), ((1096, 1122), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1112, 1122), False, 'import os, datetime, gc, warnings, glob\n'), ((1164, 1190), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (1180, 1190), False, 'import os, datetime, gc, warnings, glob\n'), ((2463, 2533), 'numpy.zeros', 'np.zeros', (['(shape[0], 3 - shape[1], shape[2], shape[3])'], {'dtype': 'np.uint8'}), '((shape[0], 3 - shape[1], shape[2], shape[3]), dtype=np.uint8)\n', (2471, 2533), True, 'import numpy as np\n'), ((5387, 5417), 'os.path.split', 'os.path.split', (['parent.filename'], {}), '(parent.filename)\n', (5400, 5417), False, 'import os, datetime, gc, warnings, glob\n'), ((5444, 5467), 'os.path.split', 'os.path.split', (['filename'], {}), '(filename)\n', (5457, 5467), False, 'import os, datetime, gc, warnings, glob\n'), ((7100, 7123), 'numpy.random.randint', 'np.random.randint', (['(1000)'], {}), '(1000)\n', (7117, 7123), True, 'import numpy as np\n'), ((11106, 11122), 'numpy.unique', 'np.unique', (['masks'], {}), '(masks)\n', (11115, 11122), True, 'import numpy as np\n'), ((12318, 12352), 'numpy.linspace', 'np.linspace', (['(0)', '(255)', 'parent.ncells'], {}), '(0, 255, parent.ncells)\n', (12329, 12352), True, 'import numpy as np\n'), ((12507, 12534), 'numpy.array', 'np.array', (['[[255, 255, 255]]'], {}), '([[255, 255, 255]])\n', (12515, 12534), True, 'import numpy as np\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import mmcv
import numpy as np
import torch
from mmdet.core import bbox2result
def imrenormalize(img, img_norm_cfg, new_img_norm_cfg):
"""Re-normalize the image.
Args:
img (Tensor | ndarray): Input image. If the input is a Tensor, the
shape is (1, C, H, W). If the input is a ndarray, the shape
is (H, W, C).
img_norm_cfg (dict): Original configuration for the normalization.
new_img_norm_cfg (dict): New configuration for the normalization.
Returns:
Tensor | ndarray: Output image with the same type and shape of
the input.
"""
if isinstance(img, torch.Tensor):
assert img.ndim == 4 and img.shape[0] == 1
new_img = img.squeeze(0).cpu().numpy().transpose(1, 2, 0)
new_img = _imrenormalize(new_img, img_norm_cfg, new_img_norm_cfg)
new_img = new_img.transpose(2, 0, 1)[None]
return torch.from_numpy(new_img).to(img)
else:
return _imrenormalize(img, img_norm_cfg, new_img_norm_cfg)
def _imrenormalize(img, img_norm_cfg, new_img_norm_cfg):
"""Re-normalize the image."""
img_norm_cfg = img_norm_cfg.copy()
new_img_norm_cfg = new_img_norm_cfg.copy()
for k, v in img_norm_cfg.items():
if (k == 'mean' or k == 'std') and not isinstance(v, np.ndarray):
img_norm_cfg[k] = np.array(v, dtype=img.dtype)
# reverse cfg
if 'to_rgb' in img_norm_cfg:
img_norm_cfg['to_bgr'] = img_norm_cfg['to_rgb']
img_norm_cfg.pop('to_rgb')
for k, v in new_img_norm_cfg.items():
if (k == 'mean' or k == 'std') and not isinstance(v, np.ndarray):
new_img_norm_cfg[k] = np.array(v, dtype=img.dtype)
img = mmcv.imdenormalize(img, **img_norm_cfg)
img = mmcv.imnormalize(img, **new_img_norm_cfg)
return img
def outs2results(bboxes=None,
labels=None,
masks=None,
ids=None,
num_classes=None,
**kwargs):
"""Convert tracking/detection results to a list of numpy arrays.
Args:
bboxes (torch.Tensor | np.ndarray): shape (n, 5)
labels (torch.Tensor | np.ndarray): shape (n, )
masks (torch.Tensor | np.ndarray): shape (n, h, w)
ids (torch.Tensor | np.ndarray): shape (n, )
num_classes (int): class number, not including background class
Returns:
dict[str : list(ndarray) | list[list[np.ndarray]]]: tracking/detection
results of each class. It may contain keys as belows:
- bbox_results (list[np.ndarray]): Each list denotes bboxes of one
category.
- mask_results (list[list[np.ndarray]]): Each outer list denotes masks
of one category. Each inner list denotes one mask belonging to
the category. Each mask has shape (h, w).
"""
assert labels is not None
assert num_classes is not None
results = dict()
if ids is not None:
valid_inds = ids > -1
ids = ids[valid_inds]
labels = labels[valid_inds]
if bboxes is not None:
if ids is not None:
bboxes = bboxes[valid_inds]
if bboxes.shape[0] == 0:
bbox_results = [
np.zeros((0, 6), dtype=np.float32)
for i in range(num_classes)
]
else:
if isinstance(bboxes, torch.Tensor):
bboxes = bboxes.cpu().numpy()
labels = labels.cpu().numpy()
ids = ids.cpu().numpy()
bbox_results = [
np.concatenate(
(ids[labels == i, None], bboxes[labels == i, :]),
axis=1) for i in range(num_classes)
]
else:
bbox_results = bbox2result(bboxes, labels, num_classes)
results['bbox_results'] = bbox_results
if masks is not None:
if ids is not None:
masks = masks[valid_inds]
if isinstance(masks, torch.Tensor):
masks = masks.detach().cpu().numpy()
masks_results = [[] for _ in range(num_classes)]
for i in range(bboxes.shape[0]):
masks_results[labels[i]].append(masks[i])
results['mask_results'] = masks_results
return results
def results2outs(bbox_results=None,
mask_results=None,
mask_shape=None,
**kwargs):
"""Restore the results (list of results of each category) into the results
of the model forward.
Args:
bbox_results (list[np.ndarray]): Each list denotes bboxes of one
category.
mask_results (list[list[np.ndarray]]): Each outer list denotes masks of
one category. Each inner list denotes one mask belonging to
the category. Each mask has shape (h, w).
mask_shape (tuple[int]): The shape (h, w) of mask.
Returns:
tuple: tracking results of each class. It may contain keys as belows:
- bboxes (np.ndarray): shape (n, 5)
- labels (np.ndarray): shape (n, )
- masks (np.ndarray): shape (n, h, w)
- ids (np.ndarray): shape (n, )
"""
outputs = dict()
if bbox_results is not None:
labels = []
for i, bbox in enumerate(bbox_results):
labels.extend([i] * bbox.shape[0])
labels = np.array(labels, dtype=np.int64)
outputs['labels'] = labels
bboxes = np.concatenate(bbox_results, axis=0).astype(np.float32)
if bboxes.shape[1] == 5:
outputs['bboxes'] = bboxes
elif bboxes.shape[1] == 6:
ids = bboxes[:, 0].astype(np.int64)
bboxes = bboxes[:, 1:]
outputs['bboxes'] = bboxes
outputs['ids'] = ids
else:
raise NotImplementedError(
f'Not supported bbox shape: (N, {bboxes.shape[1]})')
if mask_results is not None:
assert mask_shape is not None
mask_height, mask_width = mask_shape
mask_results = mmcv.concat_list(mask_results)
if len(mask_results) == 0:
masks = np.zeros((0, mask_height, mask_width)).astype(bool)
else:
masks = np.stack(mask_results, axis=0)
outputs['masks'] = masks
return outputs
| [
"torch.from_numpy",
"mmdet.core.bbox2result",
"numpy.array",
"mmcv.imdenormalize",
"numpy.stack",
"mmcv.concat_list",
"numpy.zeros",
"numpy.concatenate",
"mmcv.imnormalize"
] | [((1748, 1787), 'mmcv.imdenormalize', 'mmcv.imdenormalize', (['img'], {}), '(img, **img_norm_cfg)\n', (1766, 1787), False, 'import mmcv\n'), ((1798, 1839), 'mmcv.imnormalize', 'mmcv.imnormalize', (['img'], {}), '(img, **new_img_norm_cfg)\n', (1814, 1839), False, 'import mmcv\n'), ((5421, 5453), 'numpy.array', 'np.array', (['labels'], {'dtype': 'np.int64'}), '(labels, dtype=np.int64)\n', (5429, 5453), True, 'import numpy as np\n'), ((6087, 6117), 'mmcv.concat_list', 'mmcv.concat_list', (['mask_results'], {}), '(mask_results)\n', (6103, 6117), False, 'import mmcv\n'), ((1388, 1416), 'numpy.array', 'np.array', (['v'], {'dtype': 'img.dtype'}), '(v, dtype=img.dtype)\n', (1396, 1416), True, 'import numpy as np\n'), ((1709, 1737), 'numpy.array', 'np.array', (['v'], {'dtype': 'img.dtype'}), '(v, dtype=img.dtype)\n', (1717, 1737), True, 'import numpy as np\n'), ((3854, 3894), 'mmdet.core.bbox2result', 'bbox2result', (['bboxes', 'labels', 'num_classes'], {}), '(bboxes, labels, num_classes)\n', (3865, 3894), False, 'from mmdet.core import bbox2result\n'), ((6259, 6289), 'numpy.stack', 'np.stack', (['mask_results'], {'axis': '(0)'}), '(mask_results, axis=0)\n', (6267, 6289), True, 'import numpy as np\n'), ((956, 981), 'torch.from_numpy', 'torch.from_numpy', (['new_img'], {}), '(new_img)\n', (972, 981), False, 'import torch\n'), ((5507, 5543), 'numpy.concatenate', 'np.concatenate', (['bbox_results'], {'axis': '(0)'}), '(bbox_results, axis=0)\n', (5521, 5543), True, 'import numpy as np\n'), ((3276, 3310), 'numpy.zeros', 'np.zeros', (['(0, 6)'], {'dtype': 'np.float32'}), '((0, 6), dtype=np.float32)\n', (3284, 3310), True, 'import numpy as np\n'), ((3645, 3717), 'numpy.concatenate', 'np.concatenate', (['(ids[labels == i, None], bboxes[labels == i, :])'], {'axis': '(1)'}), '((ids[labels == i, None], bboxes[labels == i, :]), axis=1)\n', (3659, 3717), True, 'import numpy as np\n'), ((6173, 6211), 'numpy.zeros', 'np.zeros', (['(0, mask_height, mask_width)'], {}), '((0, mask_height, mask_width))\n', (6181, 6211), True, 'import numpy as np\n')] |
import random
import numpy as np
import skimage.io as sio
import skimage.color as sc
import skimage.transform as st
import torch
from torchvision import transforms
def get_patch(haze_tensor, A_tensor, t_tensor, latent_tensor, patch_size):
assert haze_tensor.shape[1:] == A_tensor.shape[1:]
assert haze_tensor.shape[1:] == t_tensor.shape[1:]
assert haze_tensor.shape[1:] == latent_tensor.shape[1:]
ih, iw = haze_tensor.shape[1:]
ix = random.randrange(0, iw - patch_size + 1)
iy = random.randrange(0, ih - patch_size + 1)
haze_tensor = haze_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
A_tensor = A_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
t_tensor = t_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
latent_tensor = latent_tensor[:, iy:iy + patch_size, ix:ix + patch_size]
return haze_tensor, A_tensor, t_tensor, latent_tensor
def set_channel(l, n_channel):
def _set_channel(img):
if img.ndim == 2:
img = np.expand_dims(img, axis=2)
c = img.shape[2]
if n_channel == 1 and c == 3:
img = np.expand_dims(sc.rgb2ycbcr(img)[:, :, 0], 2)
elif n_channel == 3 and c == 1:
img = np.concatenate([img] * n_channel, 2)
return img
return [_set_channel(_l) for _l in l]
def np2Tensor(l, rgb_range):
def _np2Tensor(img):
if img.ndim == 3:
np_transpose = np.ascontiguousarray(img.transpose((2, 0, 1)))
tensor = torch.from_numpy(np_transpose).float()
tensor.mul_(rgb_range / 255)
elif img.ndim == 2:
tensor = torch.from_numpy(np.ascontiguousarray(img)).float()
tensor.mul_(rgb_range / 255)
else:
pass
return tensor
return [_np2Tensor(_l) for _l in l]
def augment(l, hflip=True, rot=True):
hflip = hflip and random.random() < 0.5
vflip = rot and random.random() < 0.5
rot90 = rot and random.random() < 0.5
def _augment(img):
if hflip: img = img[:, ::-1, :]
if vflip: img = img[::-1, :, :]
if rot90: img = img.transpose(1, 0, 2)
return img
return [_augment(_l) for _l in l]
| [
"random.randrange",
"skimage.color.rgb2ycbcr",
"torch.from_numpy",
"numpy.ascontiguousarray",
"numpy.concatenate",
"numpy.expand_dims",
"random.random"
] | [((458, 498), 'random.randrange', 'random.randrange', (['(0)', '(iw - patch_size + 1)'], {}), '(0, iw - patch_size + 1)\n', (474, 498), False, 'import random\n'), ((508, 548), 'random.randrange', 'random.randrange', (['(0)', '(ih - patch_size + 1)'], {}), '(0, ih - patch_size + 1)\n', (524, 548), False, 'import random\n'), ((1000, 1027), 'numpy.expand_dims', 'np.expand_dims', (['img'], {'axis': '(2)'}), '(img, axis=2)\n', (1014, 1027), True, 'import numpy as np\n'), ((1867, 1882), 'random.random', 'random.random', ([], {}), '()\n', (1880, 1882), False, 'import random\n'), ((1909, 1924), 'random.random', 'random.random', ([], {}), '()\n', (1922, 1924), False, 'import random\n'), ((1951, 1966), 'random.random', 'random.random', ([], {}), '()\n', (1964, 1966), False, 'import random\n'), ((1214, 1250), 'numpy.concatenate', 'np.concatenate', (['([img] * n_channel)', '(2)'], {}), '([img] * n_channel, 2)\n', (1228, 1250), True, 'import numpy as np\n'), ((1125, 1142), 'skimage.color.rgb2ycbcr', 'sc.rgb2ycbcr', (['img'], {}), '(img)\n', (1137, 1142), True, 'import skimage.color as sc\n'), ((1490, 1520), 'torch.from_numpy', 'torch.from_numpy', (['np_transpose'], {}), '(np_transpose)\n', (1506, 1520), False, 'import torch\n'), ((1636, 1661), 'numpy.ascontiguousarray', 'np.ascontiguousarray', (['img'], {}), '(img)\n', (1656, 1661), True, 'import numpy as np\n')] |
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A simple functional keras model with one layer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python import keras
from tensorflow.python.distribute.model_collection import model_collection_base
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.python.module import module
from tensorflow.python.ops import variables
_BATCH_SIZE = 10
def _get_data_for_simple_models():
x_train = constant_op.constant(np.random.rand(1000, 3), dtype=dtypes.float32)
y_train = constant_op.constant(np.random.rand(1000, 5), dtype=dtypes.float32)
x_predict = constant_op.constant(
np.random.rand(1000, 3), dtype=dtypes.float32)
return x_train, y_train, x_predict
class SimpleFunctionalModel(model_collection_base.ModelAndInput):
"""A simple functinal model and its inputs."""
def get_model(self, **kwargs):
output_name = 'output_layer'
x = keras.layers.Input(shape=(3,), dtype=dtypes.float32)
y = keras.layers.Dense(5, dtype=dtypes.float32, name=output_name)(x)
model = keras.Model(inputs=x, outputs=y)
optimizer = gradient_descent.SGD(learning_rate=0.001)
experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',
None)
assert experimental_run_tf_function is not None
model.compile(
loss='mse',
metrics=['mae'],
optimizer=optimizer,
experimental_run_tf_function=experimental_run_tf_function)
return model, output_name
def get_data(self):
return _get_data_for_simple_models()
def get_batch_size(self):
return _BATCH_SIZE
class SimpleSequentialModel(model_collection_base.ModelAndInput):
"""A simple sequential model and its inputs."""
def get_model(self, **kwargs):
output_name = 'output_layer'
model = keras.Sequential()
y = keras.layers.Dense(
5, dtype=dtypes.float32, name=output_name, input_dim=3)
model.add(y)
optimizer = gradient_descent.SGD(learning_rate=0.001)
experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',
None)
assert experimental_run_tf_function is not None
model.compile(
loss='mse',
metrics=['mae'],
optimizer=optimizer,
experimental_run_tf_function=experimental_run_tf_function)
return model, output_name
def get_data(self):
return _get_data_for_simple_models()
def get_batch_size(self):
return _BATCH_SIZE
class _SimpleModel(keras.Model):
output_name = 'output_layer'
def __init__(self):
self._dense_layer = keras.layers.Dense(
5, dtype=dtypes.float32, name=self.output_name)
def call(self, inputs):
return self._dense_layer(inputs)
class SimpleSubclassModel(model_collection_base.ModelAndInput):
"""A simple subclass model and its data."""
def get_model(self, **kwargs):
model = _SimpleModel()
optimizer = gradient_descent.SGD(learning_rate=0.001)
experimental_run_tf_function = kwargs.pop('experimental_run_tf_function',
None)
assert experimental_run_tf_function is not None
model.compile(
loss='mse',
metrics=['mae'],
cloning=False,
optimizer=optimizer,
experimental_run_tf_function=experimental_run_tf_function)
return model, model.output_name
def get_data(self):
return _get_data_for_simple_models()
def get_batch_size(self):
return _BATCH_SIZE
class _SimpleModule(module.Module):
def __init__(self):
self.v = variables.Variable(3.0)
@def_function.function
def __call__(self, x):
return self.v * x
class SimpleTFModuleModel(model_collection_base.ModelAndInput):
"""A simple model based on tf.Module and its data."""
def get_model(self, **kwargs):
model = _SimpleModule()
return model, 'foo'
def get_data(self):
return _get_data_for_simple_models()
def get_batch_size(self):
return _BATCH_SIZE
| [
"numpy.random.rand",
"tensorflow.python.keras.Model",
"tensorflow.python.keras.layers.Dense",
"tensorflow.python.keras.Sequential",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.keras.optimizer_v2.gradient_descent.SGD",
"tensorflow.python.keras.layers.Input"
] | [((1380, 1403), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (1394, 1403), True, 'import numpy as np\n'), ((1460, 1483), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(5)'], {}), '(1000, 5)\n', (1474, 1483), True, 'import numpy as np\n'), ((1549, 1572), 'numpy.random.rand', 'np.random.rand', (['(1000)', '(3)'], {}), '(1000, 3)\n', (1563, 1572), True, 'import numpy as np\n'), ((1827, 1879), 'tensorflow.python.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(3,)', 'dtype': 'dtypes.float32'}), '(shape=(3,), dtype=dtypes.float32)\n', (1845, 1879), False, 'from tensorflow.python import keras\n'), ((1966, 1998), 'tensorflow.python.keras.Model', 'keras.Model', ([], {'inputs': 'x', 'outputs': 'y'}), '(inputs=x, outputs=y)\n', (1977, 1998), False, 'from tensorflow.python import keras\n'), ((2015, 2056), 'tensorflow.python.keras.optimizer_v2.gradient_descent.SGD', 'gradient_descent.SGD', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2035, 2056), False, 'from tensorflow.python.keras.optimizer_v2 import gradient_descent\n'), ((2744, 2762), 'tensorflow.python.keras.Sequential', 'keras.Sequential', ([], {}), '()\n', (2760, 2762), False, 'from tensorflow.python import keras\n'), ((2771, 2845), 'tensorflow.python.keras.layers.Dense', 'keras.layers.Dense', (['(5)'], {'dtype': 'dtypes.float32', 'name': 'output_name', 'input_dim': '(3)'}), '(5, dtype=dtypes.float32, name=output_name, input_dim=3)\n', (2789, 2845), False, 'from tensorflow.python import keras\n'), ((2888, 2929), 'tensorflow.python.keras.optimizer_v2.gradient_descent.SGD', 'gradient_descent.SGD', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (2908, 2929), False, 'from tensorflow.python.keras.optimizer_v2 import gradient_descent\n'), ((3533, 3599), 'tensorflow.python.keras.layers.Dense', 'keras.layers.Dense', (['(5)'], {'dtype': 'dtypes.float32', 'name': 'self.output_name'}), '(5, dtype=dtypes.float32, name=self.output_name)\n', (3551, 3599), False, 'from tensorflow.python import keras\n'), ((3862, 3903), 'tensorflow.python.keras.optimizer_v2.gradient_descent.SGD', 'gradient_descent.SGD', ([], {'learning_rate': '(0.001)'}), '(learning_rate=0.001)\n', (3882, 3903), False, 'from tensorflow.python.keras.optimizer_v2 import gradient_descent\n'), ((4496, 4519), 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), '(3.0)\n', (4514, 4519), False, 'from tensorflow.python.ops import variables\n'), ((1888, 1949), 'tensorflow.python.keras.layers.Dense', 'keras.layers.Dense', (['(5)'], {'dtype': 'dtypes.float32', 'name': 'output_name'}), '(5, dtype=dtypes.float32, name=output_name)\n', (1906, 1949), False, 'from tensorflow.python import keras\n')] |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
"""Other coordinate and distance-related functions"""
import numpy as np
from astropy.units import Quantity, Unit
__all__ = [
"cartesian",
"galactic",
"velocity_glon_glat",
"motion_since_birth",
"polar",
"D_SUN_TO_GALACTIC_CENTER",
]
# TODO: replace this with the default from the Galactocentric frame in astropy.coordinates
D_SUN_TO_GALACTIC_CENTER = Quantity(8.5, "kpc")
"""Default assumed distance from the Sun to the Galactic center (`~astropy.units.Quantity`)"""
def cartesian(r, theta):
"""Convert polar coordinates to cartesian coordinates."""
x = r * np.cos(theta)
y = r * np.sin(theta)
return x, y
def polar(x, y):
"""Convert cartesian coordinates to polar coordinates."""
r = np.sqrt(x ** 2 + y ** 2)
theta = np.arctan2(y, x)
return r, theta
def galactic(x, y, z, obs_pos=None):
"""Compute galactic coordinates lon, lat and distance.
For given position in cartesian coordinates (kpc).
"""
obs_pos = obs_pos or [D_SUN_TO_GALACTIC_CENTER, 0, 0]
y_prime = y + D_SUN_TO_GALACTIC_CENTER
d = np.sqrt(x ** 2 + y_prime ** 2 + z ** 2)
glon = np.arctan2(x, y_prime).to("deg")
glat = np.arcsin(z / d).to("deg")
return d, glon, glat
def velocity_glon_glat(x, y, z, vx, vy, vz):
"""
Compute projected angular velocity in galactic coordinates.
Parameters
----------
x, y, z : `~astropy.units.Quantity`
Position in x, y, z direction
vx, vy, vz : `~astropy.units.Quantity`
Velocity in x, y, z direction
Returns
-------
v_glon, v_glat : `~astropy.units.Quantity`
Projected velocity in Galactic sky coordinates
"""
y_prime = y + D_SUN_TO_GALACTIC_CENTER
d = np.sqrt(x ** 2 + y_prime ** 2 + z ** 2)
r = np.sqrt(x ** 2 + y_prime ** 2)
v_glon = (-y_prime * vx + x * vy) / r ** 2
v_glat = vz / (np.sqrt(1 - (z / d) ** 2) * d) - np.sqrt(
vx ** 2 + vy ** 2 + vz ** 2
) * z / (np.sqrt(1 - (z / d) ** 2) * d ** 2)
return v_glon * Unit("rad"), v_glat * Unit("rad")
def motion_since_birth(v, age, theta, phi):
"""
Compute motion of a object with given velocity, direction and age.
Parameters
----------
v : `~astropy.units.Quantity`
Absolute value of the velocity
age : `~astropy.units.Quantity`
Age of the source.
theta, phi : `~astropy.units.Quantity`
Angular direction of the velocity.
Returns
-------
dx, dy, dz : `~astropy.units.Quantity`
Displacement in x, y, z direction
vx, vy, vz : `~astropy.units.Quantity`
Velocity in x, y, z direction
"""
vx = v * np.cos(phi) * np.sin(theta)
vy = v * np.sin(phi) * np.sin(theta)
vz = v * np.cos(theta)
# Compute new positions
dx = vx * age
dy = vy * age
dz = vz * age
return dx, dy, dz, vx, vy, vz
| [
"numpy.sqrt",
"astropy.units.Unit",
"numpy.arcsin",
"numpy.arctan2",
"numpy.cos",
"numpy.sin",
"astropy.units.Quantity"
] | [((442, 462), 'astropy.units.Quantity', 'Quantity', (['(8.5)', '"""kpc"""'], {}), "(8.5, 'kpc')\n", (450, 462), False, 'from astropy.units import Quantity, Unit\n'), ((804, 828), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y ** 2)'], {}), '(x ** 2 + y ** 2)\n', (811, 828), True, 'import numpy as np\n'), ((841, 857), 'numpy.arctan2', 'np.arctan2', (['y', 'x'], {}), '(y, x)\n', (851, 857), True, 'import numpy as np\n'), ((1149, 1188), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y_prime ** 2 + z ** 2)'], {}), '(x ** 2 + y_prime ** 2 + z ** 2)\n', (1156, 1188), True, 'import numpy as np\n'), ((1791, 1830), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y_prime ** 2 + z ** 2)'], {}), '(x ** 2 + y_prime ** 2 + z ** 2)\n', (1798, 1830), True, 'import numpy as np\n'), ((1839, 1869), 'numpy.sqrt', 'np.sqrt', (['(x ** 2 + y_prime ** 2)'], {}), '(x ** 2 + y_prime ** 2)\n', (1846, 1869), True, 'import numpy as np\n'), ((659, 672), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (665, 672), True, 'import numpy as np\n'), ((685, 698), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (691, 698), True, 'import numpy as np\n'), ((2722, 2735), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2728, 2735), True, 'import numpy as np\n'), ((2763, 2776), 'numpy.sin', 'np.sin', (['theta'], {}), '(theta)\n', (2769, 2776), True, 'import numpy as np\n'), ((2790, 2803), 'numpy.cos', 'np.cos', (['theta'], {}), '(theta)\n', (2796, 2803), True, 'import numpy as np\n'), ((1200, 1222), 'numpy.arctan2', 'np.arctan2', (['x', 'y_prime'], {}), '(x, y_prime)\n', (1210, 1222), True, 'import numpy as np\n'), ((1244, 1260), 'numpy.arcsin', 'np.arcsin', (['(z / d)'], {}), '(z / d)\n', (1253, 1260), True, 'import numpy as np\n'), ((2084, 2095), 'astropy.units.Unit', 'Unit', (['"""rad"""'], {}), "('rad')\n", (2088, 2095), False, 'from astropy.units import Quantity, Unit\n'), ((2106, 2117), 'astropy.units.Unit', 'Unit', (['"""rad"""'], {}), "('rad')\n", (2110, 2117), False, 'from astropy.units import Quantity, Unit\n'), ((2708, 2719), 'numpy.cos', 'np.cos', (['phi'], {}), '(phi)\n', (2714, 2719), True, 'import numpy as np\n'), ((2749, 2760), 'numpy.sin', 'np.sin', (['phi'], {}), '(phi)\n', (2755, 2760), True, 'import numpy as np\n'), ((1937, 1962), 'numpy.sqrt', 'np.sqrt', (['(1 - (z / d) ** 2)'], {}), '(1 - (z / d) ** 2)\n', (1944, 1962), True, 'import numpy as np\n'), ((1970, 2006), 'numpy.sqrt', 'np.sqrt', (['(vx ** 2 + vy ** 2 + vz ** 2)'], {}), '(vx ** 2 + vy ** 2 + vz ** 2)\n', (1977, 2006), True, 'import numpy as np\n'), ((2028, 2053), 'numpy.sqrt', 'np.sqrt', (['(1 - (z / d) ** 2)'], {}), '(1 - (z / d) ** 2)\n', (2035, 2053), True, 'import numpy as np\n')] |
"""Utility functions for conversion between color models."""
__all__ = [
"color_to_rgb",
"color_to_rgba",
"rgb_to_color",
"rgba_to_color",
"rgb_to_hex",
"hex_to_rgb",
"invert_color",
"color_to_int_rgb",
"color_to_int_rgba",
"color_gradient",
"interpolate_color",
"average_color",
"random_bright_color",
"random_color",
"get_shaded_rgb",
"DARK_BLUE",
"DARK_BROWN",
"LIGHT_BROWN",
"BLUE_E",
"BLUE_D",
"BLUE_C",
"BLUE",
"BLUE_B",
"BLUE_A",
"TEAL_E",
"TEAL_D",
"TEAL_C",
"TEAL",
"TEAL_B",
"TEAL_A",
"GREEN_E",
"GREEN_D",
"GREEN_C",
"GREEN",
"GREEN_B",
"GREEN_A",
"YELLOW_E",
"YELLOW_D",
"YELLOW_C",
"YELLOW",
"YELLOW_B",
"YELLOW_A",
"GOLD_E",
"GOLD_D",
"GOLD_C",
"GOLD",
"GOLD_B",
"GOLD_A",
"RED_E",
"RED_D",
"RED_C",
"RED",
"RED_B",
"RED_A",
"MAROON_E",
"MAROON_D",
"MAROON_C",
"MAROON",
"MAROON_B",
"MAROON_A",
"PURPLE_E",
"PURPLE_D",
"PURPLE_C",
"PURPLE",
"PURPLE_B",
"PURPLE_A",
"WHITE",
"BLACK",
"LIGHT_GRAY",
"LIGHT_GREY",
"GRAY",
"GREY",
"DARK_GREY",
"DARK_GRAY",
"DARKER_GREY",
"DARKER_GRAY",
"GREY_BROWN",
"PINK",
"LIGHT_PINK",
"GREEN_SCREEN",
"ORANGE",
]
from enum import Enum
import random
from colour import Color
import numpy as np
from ..utils.bezier import interpolate
from ..utils.simple_functions import clip_in_place
from ..utils.space_ops import normalize
class Colors(Enum):
"""A list of pre-defined colors.
Examples
--------
The preferred way of using these colors is
.. code-block:: python
>>> import manim.utils.color as C
>>> C.WHITE
'#FFFFFF'
Note this way uses the name of the colors in UPPERCASE.
Alternatively, you can also import this Enum directly and use its members
directly, through the use of :code:`color.value`. Note this way uses the
name of the colors in lowercase.
.. code-block:: python
>>> from manim.utils.color import Colors
>>> Colors.white.value
'#FFFFFF'
"""
dark_blue = "#236B8E"
dark_brown = "#8B4513"
light_brown = "#CD853F"
blue_e = "#1C758A"
blue_d = "#29ABCA"
blue_c = "#58C4DD"
blue = "#58C4DD"
blue_b = "#9CDCEB"
blue_a = "#C7E9F1"
teal_e = "#49A88F"
teal_d = "#55C1A7"
teal_c = "#5CD0B3"
teal = "#5CD0B3"
teal_b = "#76DDC0"
teal_a = "#ACEAD7"
green_e = "#699C52"
green_d = "#77B05D"
green_c = "#83C167"
green = "#83C167"
green_b = "#A6CF8C"
green_a = "#C9E2AE"
yellow_e = "#E8C11C"
yellow_d = "#F4D345"
yellow_c = "#FFFF00"
yellow = "#FFFF00"
yellow_b = "#FFEA94"
yellow_a = "#FFF1B6"
gold_e = "#C78D46"
gold_d = "#E1A158"
gold_c = "#F0AC5F"
gold = "#F0AC5F"
gold_b = "#F9B775"
gold_a = "#F7C797"
red_e = "#CF5044"
red_d = "#E65A4C"
red_c = "#FC6255"
red = "#FC6255"
red_b = "#FF8080"
red_a = "#F7A1A3"
maroon_e = "#94424F"
maroon_d = "#A24D61"
maroon_c = "#C55F73"
maroon = "#C55F73"
maroon_b = "#EC92AB"
maroon_a = "#ECABC1"
purple_e = "#644172"
purple_d = "#715582"
purple_c = "#9A72AC"
purple = "#9A72AC"
purple_b = "#B189C6"
purple_a = "#CAA3E8"
white = "#FFFFFF"
black = "#000000"
light_gray = "#BBBBBB"
light_grey = "#BBBBBB"
gray = "#888888"
grey = "#888888"
dark_grey = "#444444"
dark_gray = "#444444"
darker_grey = "#222222"
darker_gray = "#222222"
grey_brown = "#736357"
pink = "#D147BD"
light_pink = "#DC75CD"
green_screen = "#00FF00"
orange = "#FF862F"
DARK_BLUE = Colors.dark_blue.value
DARK_BROWN = Colors.dark_brown.value
LIGHT_BROWN = Colors.dark_brown.value
BLUE_E = Colors.blue_e.value
BLUE_D = Colors.blue_d.value
BLUE_C = Colors.blue_c.value
BLUE = Colors.blue.value
BLUE_B = Colors.blue_b.value
BLUE_A = Colors.blue_a.value
TEAL_E = Colors.teal_e.value
TEAL_D = Colors.teal_d.value
TEAL_C = Colors.teal_c.value
TEAL = Colors.teal.value
TEAL_B = Colors.teal_b.value
TEAL_A = Colors.teal_a.value
GREEN_E = Colors.green_e.value
GREEN_D = Colors.green_d.value
GREEN_C = Colors.green_c.value
GREEN = Colors.green.value
GREEN_B = Colors.green_b.value
GREEN_A = Colors.green_a.value
YELLOW_E = Colors.yellow_e.value
YELLOW_D = Colors.yellow_d.value
YELLOW_C = Colors.yellow_c.value
YELLOW = Colors.yellow.value
YELLOW_B = Colors.yellow_b.value
YELLOW_A = Colors.yellow_a.value
GOLD_E = Colors.gold_e.value
GOLD_D = Colors.gold_d.value
GOLD_C = Colors.gold_c.value
GOLD = Colors.gold.value
GOLD_B = Colors.gold_b.value
GOLD_A = Colors.gold_a.value
RED_E = Colors.red_e.value
RED_D = Colors.red_d.value
RED_C = Colors.red_c.value
RED = Colors.red.value
RED_B = Colors.red_b.value
RED_A = Colors.red_a.value
MAROON_E = Colors.maroon_e.value
MAROON_D = Colors.maroon_d.value
MAROON_C = Colors.maroon_c.value
MAROON = Colors.maroon.value
MAROON_B = Colors.maroon_b.value
MAROON_A = Colors.maroon_a.value
PURPLE_E = Colors.purple_e.value
PURPLE_D = Colors.purple_d.value
PURPLE_C = Colors.purple_c.value
PURPLE = Colors.purple.value
PURPLE_B = Colors.purple_b.value
PURPLE_A = Colors.purple_a.value
WHITE = Colors.white.value
BLACK = Colors.black.value
LIGHT_GRAY = Colors.light_gray.value
LIGHT_GREY = Colors.light_grey.value
GRAY = Colors.gray.value
GREY = Colors.grey.value
DARK_GREY = Colors.dark_grey.value
DARK_GRAY = Colors.dark_gray.value
DARKER_GREY = Colors.darker_gray.value
DARKER_GRAY = Colors.darker_gray.value
GREY_BROWN = Colors.grey_brown.value
PINK = Colors.pink.value
LIGHT_PINK = Colors.light_pink.value
GREEN_SCREEN = Colors.green_screen.value
ORANGE = Colors.orange.value
def color_to_rgb(color):
if isinstance(color, str):
return hex_to_rgb(color)
elif isinstance(color, Color):
return np.array(color.get_rgb())
else:
raise ValueError("Invalid color type")
def color_to_rgba(color, alpha=1):
return np.array([*color_to_rgb(color), alpha])
def rgb_to_color(rgb):
try:
return Color(rgb=rgb)
except:
return Color(WHITE)
def rgba_to_color(rgba):
return rgb_to_color(rgba[:3])
def rgb_to_hex(rgb):
return "#" + "".join("%02x" % int(255 * x) for x in rgb)
def hex_to_rgb(hex_code):
hex_part = hex_code[1:]
if len(hex_part) == 3:
hex_part = "".join([2 * c for c in hex_part])
return np.array([int(hex_part[i : i + 2], 16) / 255 for i in range(0, 6, 2)])
def invert_color(color):
return rgb_to_color(1.0 - color_to_rgb(color))
def color_to_int_rgb(color):
return (255 * color_to_rgb(color)).astype("uint8")
def color_to_int_rgba(color, opacity=1.0):
alpha = int(255 * opacity)
return np.append(color_to_int_rgb(color), alpha)
def color_gradient(reference_colors, length_of_output):
if length_of_output == 0:
return reference_colors[0]
rgbs = list(map(color_to_rgb, reference_colors))
alphas = np.linspace(0, (len(rgbs) - 1), length_of_output)
floors = alphas.astype("int")
alphas_mod1 = alphas % 1
# End edge case
alphas_mod1[-1] = 1
floors[-1] = len(rgbs) - 2
return [
rgb_to_color(interpolate(rgbs[i], rgbs[i + 1], alpha))
for i, alpha in zip(floors, alphas_mod1)
]
def interpolate_color(color1, color2, alpha):
rgb = interpolate(color_to_rgb(color1), color_to_rgb(color2), alpha)
return rgb_to_color(rgb)
def average_color(*colors):
rgbs = np.array(list(map(color_to_rgb, colors)))
mean_rgb = np.apply_along_axis(np.mean, 0, rgbs)
return rgb_to_color(mean_rgb)
def random_bright_color():
color = random_color()
curr_rgb = color_to_rgb(color)
new_rgb = interpolate(curr_rgb, np.ones(len(curr_rgb)), 0.5)
return Color(rgb=new_rgb)
def random_color():
return random.choice([c.value for c in list(Colors)])
def get_shaded_rgb(rgb, point, unit_normal_vect, light_source):
to_sun = normalize(light_source - point)
factor = 0.5 * np.dot(unit_normal_vect, to_sun) ** 3
if factor < 0:
factor *= 0.5
result = rgb + factor
clip_in_place(rgb + factor, 0, 1)
return result
| [
"numpy.dot",
"colour.Color",
"numpy.apply_along_axis"
] | [((7681, 7718), 'numpy.apply_along_axis', 'np.apply_along_axis', (['np.mean', '(0)', 'rgbs'], {}), '(np.mean, 0, rgbs)\n', (7700, 7718), True, 'import numpy as np\n'), ((7920, 7938), 'colour.Color', 'Color', ([], {'rgb': 'new_rgb'}), '(rgb=new_rgb)\n', (7925, 7938), False, 'from colour import Color\n'), ((6213, 6227), 'colour.Color', 'Color', ([], {'rgb': 'rgb'}), '(rgb=rgb)\n', (6218, 6227), False, 'from colour import Color\n'), ((6255, 6267), 'colour.Color', 'Color', (['WHITE'], {}), '(WHITE)\n', (6260, 6267), False, 'from colour import Color\n'), ((8149, 8181), 'numpy.dot', 'np.dot', (['unit_normal_vect', 'to_sun'], {}), '(unit_normal_vect, to_sun)\n', (8155, 8181), True, 'import numpy as np\n')] |
import numpy as np
from utils.rbo import rbo as rbo_utils
from itertools import combinations
def proportion_common_words(topics, topk=10):
"""
compute proportion of unique words
Parameters
----------
topics: a list of lists of words
topk: top k words on which the topic diversity will be computed
Returns
-------
pcw : proportion of common words
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than '+str(topk))
else:
unique_words = set()
for topic in topics:
unique_words = unique_words.union(set(topic[:topk]))
puw = 1 - (len(unique_words) / (topk * len(topics)))
return puw
def rbo(topics, weight=0.9, topk=10):
"""
compute rank-biased overlap
Parameters
----------
topics: a list of lists of words
topk: top k words on which the topic diversity
will be computed
weight: p (float), default 1.0: Weight of each
agreement at depth d:p**(d-1). When set
to 1.0, there is no weight, the rbo returns
to average overlap.
Returns
-------
rbo : score of the rank biased overlap over the topics
"""
if topk > len(topics[0]):
raise Exception('Words in topics are less than topk')
else:
collect = []
for list1, list2 in combinations(topics, 2):
word2index = get_word2index(list1, list2)
indexed_list1 = [word2index[word] for word in list1]
indexed_list2 = [word2index[word] for word in list2]
rbo_val = rbo_utils(indexed_list1[:topk], indexed_list2[:topk], p=weight)[2]
collect.append(rbo_val)
return np.mean(collect)
def pairwise_jaccard_similarity(topics, topk=10):
sim = 0
count = 0
for list1, list2 in combinations(topics, 2):
intersection = len(list(set(list1[:topk]).intersection(list2[:topk])))
union = (len(list1[:topk]) + len(list2[:topk])) - intersection
count = count + 1
sim = sim + (float(intersection) / union)
return sim/count
def get_word2index(list1, list2):
words = set(list1)
words = words.union(set(list2))
word2index = {w: i for i, w in enumerate(words)}
return word2index
| [
"itertools.combinations",
"numpy.mean",
"utils.rbo.rbo"
] | [((1832, 1855), 'itertools.combinations', 'combinations', (['topics', '(2)'], {}), '(topics, 2)\n', (1844, 1855), False, 'from itertools import combinations\n'), ((1364, 1387), 'itertools.combinations', 'combinations', (['topics', '(2)'], {}), '(topics, 2)\n', (1376, 1387), False, 'from itertools import combinations\n'), ((1713, 1729), 'numpy.mean', 'np.mean', (['collect'], {}), '(collect)\n', (1720, 1729), True, 'import numpy as np\n'), ((1595, 1658), 'utils.rbo.rbo', 'rbo_utils', (['indexed_list1[:topk]', 'indexed_list2[:topk]'], {'p': 'weight'}), '(indexed_list1[:topk], indexed_list2[:topk], p=weight)\n', (1604, 1658), True, 'from utils.rbo import rbo as rbo_utils\n')] |
import sys
sys.path.append('../')
from config import DATA_PATH
import pylangacq
from aux import load_audio
from aux_evaluate import first_last, get_bounds, filtered_overlapping_indexes, prepare_mono_for_forward
import os
import numpy as np
from spectral_cluster import get_affinity_matrix, cluster_affinity, adjust_labels_to_signal, arr_to_areas
from pyannote.core import Segment, Timeline, Annotation
from pyannote.metrics.diarization import DiarizationErrorRate
def get_speakers_distribution(stamps):
a = stamps.count(b'A').sum()
b = stamps.count(b'B').sum()
silence = stamps.count(b'S').sum()
return {'A':a, 'B':b, 'silence':silence}
def first_last(bool_arr):
result = np.where(bool_arr)
return result[0][0], result[0][-1]
def get_bounds(b1, b2):
bottom=np.min([b1[0], b2[0]])
top = np.max([b1[1], b2[1]])
return bottom, top
def filtered_overlapping_indexes(labels1, labels2):
assert labels1.shape == labels2.shape
return np.where(np.logical_not(np.logical_and(labels1, labels2)))
class ChaTool:
def __init__(self, *, cha_path_file, wav_path_file, sampling_rate):
self.sampling_rate = sampling_rate
self.wav_filepath = wav_path_file
_, self.mono_audio = load_audio(self.wav_filepath, sampling_rate, mono=True)
reader = pylangacq.Reader.from_files([cha_path_file])
self.utterances = reader.utterances()
def get_timestamps(self):
time_stamps_A = [utt.time_marks for utt in self.utterances if utt.participant=='A' and utt.time_marks is not None]
time_stamps_B = [utt.time_marks for utt in self.utterances if utt.participant=='B' and utt.time_marks is not None]
time_stamps_a = list(map(np.array, time_stamps_A))
time_stamps_b = list(map(np.array, time_stamps_B))
stamps_a = np.zeros_like(self.mono_audio, dtype=np.bool)
stamps_b = np.zeros_like(self.mono_audio, dtype=np.bool)
coeff = self.sampling_rate/1000
for s in time_stamps_a:
stamps_a[int(s[0]*coeff): int(s[1]*coeff)] = True
for s in time_stamps_b:
stamps_b[int(s[0]*coeff): int(s[1]*coeff)] = True
return stamps_a, stamps_b
def stamps_and_mono(self):
stamps_a, stamps_b = self.get_timestamps()
bottom, top = get_bounds( first_last(stamps_a), first_last(stamps_b) )
stamps_a_bounded = stamps_a[bottom:top]
stamps_b_bounded = stamps_b[bottom:top]
mono_bounded = self.mono_audio[bottom:top]
overlap_filtered = filtered_overlapping_indexes(stamps_a_bounded, stamps_b_bounded)
stamps_a_filtered = stamps_a_bounded[overlap_filtered]
stamps_b_filtered = stamps_b_bounded[overlap_filtered]
mono_filtered = mono_bounded[overlap_filtered]
"""new"""
stamps_chars = np.chararray(stamps_a_filtered.shape)
stamps_chars.fill('S')
stamps_chars[np.where(stamps_a_filtered)] = 'A'
stamps_chars[np.where(stamps_b_filtered)] = 'B'
assert np.logical_and(stamps_a_filtered, stamps_b_filtered).any()==False,'intersection of speech '
return stamps_chars, mono_filtered
class CharLabels:
def __init__(self, labels, destination_size, speech_indexes ):
char_array = np.chararray(labels.shape[1])
char_array[np.where(labels[0])] = 'A'
char_array[np.where(labels[1])] = 'B'
stretched = adjust_labels_to_signal(char_array, np.sum(speech_indexes))
result = np.chararray(destination_size)
result[np.where(speech_indexes)] = stretched
result[np.where(~speech_indexes)] = 'S'
self.char_labels = result
class Hypothesis:
def __init__(self, char_labels):
hypothesis = Annotation()
"""pyannote.metrics does not require to have same label names"""
a_areas_hypothesis = arr_to_areas(char_labels, 'A')
b_areas_hypothesis = arr_to_areas(char_labels, 'B')
for elem_a, elem_b in zip(a_areas_hypothesis, b_areas_hypothesis):
hypothesis[Segment(elem_a[0], elem_a[1])] = 'A'
hypothesis[Segment(elem_b[0], elem_b[1])] = 'B'
self.hypothesis = hypothesis
class Reference:
def __init__(self, char_stamps):
reference = Annotation()
areas_a = arr_to_areas(char_stamps, 'A')
areas_b = arr_to_areas(char_stamps, 'B')
reference = Annotation()
for elem in areas_a:
reference[Segment(elem[0], elem[1])] = 'A'
for elem in areas_b:
reference[Segment(elem[0], elem[1])] = 'B'
self.reference = reference
| [
"pyannote.core.Segment",
"numpy.logical_and",
"numpy.where",
"spectral_cluster.arr_to_areas",
"numpy.chararray",
"numpy.zeros_like",
"numpy.max",
"pylangacq.Reader.from_files",
"numpy.sum",
"aux.load_audio",
"numpy.min",
"aux_evaluate.filtered_overlapping_indexes",
"aux_evaluate.first_last",... | [((11, 33), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (26, 33), False, 'import sys\n'), ((698, 716), 'numpy.where', 'np.where', (['bool_arr'], {}), '(bool_arr)\n', (706, 716), True, 'import numpy as np\n'), ((792, 814), 'numpy.min', 'np.min', (['[b1[0], b2[0]]'], {}), '([b1[0], b2[0]])\n', (798, 814), True, 'import numpy as np\n'), ((825, 847), 'numpy.max', 'np.max', (['[b1[1], b2[1]]'], {}), '([b1[1], b2[1]])\n', (831, 847), True, 'import numpy as np\n'), ((1243, 1298), 'aux.load_audio', 'load_audio', (['self.wav_filepath', 'sampling_rate'], {'mono': '(True)'}), '(self.wav_filepath, sampling_rate, mono=True)\n', (1253, 1298), False, 'from aux import load_audio\n'), ((1316, 1360), 'pylangacq.Reader.from_files', 'pylangacq.Reader.from_files', (['[cha_path_file]'], {}), '([cha_path_file])\n', (1343, 1360), False, 'import pylangacq\n'), ((1853, 1898), 'numpy.zeros_like', 'np.zeros_like', (['self.mono_audio'], {'dtype': 'np.bool'}), '(self.mono_audio, dtype=np.bool)\n', (1866, 1898), True, 'import numpy as np\n'), ((1918, 1963), 'numpy.zeros_like', 'np.zeros_like', (['self.mono_audio'], {'dtype': 'np.bool'}), '(self.mono_audio, dtype=np.bool)\n', (1931, 1963), True, 'import numpy as np\n'), ((2616, 2680), 'aux_evaluate.filtered_overlapping_indexes', 'filtered_overlapping_indexes', (['stamps_a_bounded', 'stamps_b_bounded'], {}), '(stamps_a_bounded, stamps_b_bounded)\n', (2644, 2680), False, 'from aux_evaluate import first_last, get_bounds, filtered_overlapping_indexes, prepare_mono_for_forward\n'), ((2912, 2949), 'numpy.chararray', 'np.chararray', (['stamps_a_filtered.shape'], {}), '(stamps_a_filtered.shape)\n', (2924, 2949), True, 'import numpy as np\n'), ((3356, 3385), 'numpy.chararray', 'np.chararray', (['labels.shape[1]'], {}), '(labels.shape[1])\n', (3368, 3385), True, 'import numpy as np\n'), ((3593, 3623), 'numpy.chararray', 'np.chararray', (['destination_size'], {}), '(destination_size)\n', (3605, 3623), True, 'import numpy as np\n'), ((3855, 3867), 'pyannote.core.Annotation', 'Annotation', ([], {}), '()\n', (3865, 3867), False, 'from pyannote.core import Segment, Timeline, Annotation\n'), ((3970, 4000), 'spectral_cluster.arr_to_areas', 'arr_to_areas', (['char_labels', '"""A"""'], {}), "(char_labels, 'A')\n", (3982, 4000), False, 'from spectral_cluster import get_affinity_matrix, cluster_affinity, adjust_labels_to_signal, arr_to_areas\n'), ((4030, 4060), 'spectral_cluster.arr_to_areas', 'arr_to_areas', (['char_labels', '"""B"""'], {}), "(char_labels, 'B')\n", (4042, 4060), False, 'from spectral_cluster import get_affinity_matrix, cluster_affinity, adjust_labels_to_signal, arr_to_areas\n'), ((4373, 4385), 'pyannote.core.Annotation', 'Annotation', ([], {}), '()\n', (4383, 4385), False, 'from pyannote.core import Segment, Timeline, Annotation\n'), ((4404, 4434), 'spectral_cluster.arr_to_areas', 'arr_to_areas', (['char_stamps', '"""A"""'], {}), "(char_stamps, 'A')\n", (4416, 4434), False, 'from spectral_cluster import get_affinity_matrix, cluster_affinity, adjust_labels_to_signal, arr_to_areas\n'), ((4453, 4483), 'spectral_cluster.arr_to_areas', 'arr_to_areas', (['char_stamps', '"""B"""'], {}), "(char_stamps, 'B')\n", (4465, 4483), False, 'from spectral_cluster import get_affinity_matrix, cluster_affinity, adjust_labels_to_signal, arr_to_areas\n'), ((4505, 4517), 'pyannote.core.Annotation', 'Annotation', ([], {}), '()\n', (4515, 4517), False, 'from pyannote.core import Segment, Timeline, Annotation\n'), ((1006, 1038), 'numpy.logical_and', 'np.logical_and', (['labels1', 'labels2'], {}), '(labels1, labels2)\n', (1020, 1038), True, 'import numpy as np\n'), ((2379, 2399), 'aux_evaluate.first_last', 'first_last', (['stamps_a'], {}), '(stamps_a)\n', (2389, 2399), False, 'from aux_evaluate import first_last, get_bounds, filtered_overlapping_indexes, prepare_mono_for_forward\n'), ((2401, 2421), 'aux_evaluate.first_last', 'first_last', (['stamps_b'], {}), '(stamps_b)\n', (2411, 2421), False, 'from aux_evaluate import first_last, get_bounds, filtered_overlapping_indexes, prepare_mono_for_forward\n'), ((3002, 3029), 'numpy.where', 'np.where', (['stamps_a_filtered'], {}), '(stamps_a_filtered)\n', (3010, 3029), True, 'import numpy as np\n'), ((3058, 3085), 'numpy.where', 'np.where', (['stamps_b_filtered'], {}), '(stamps_b_filtered)\n', (3066, 3085), True, 'import numpy as np\n'), ((3405, 3424), 'numpy.where', 'np.where', (['labels[0]'], {}), '(labels[0])\n', (3413, 3424), True, 'import numpy as np\n'), ((3451, 3470), 'numpy.where', 'np.where', (['labels[1]'], {}), '(labels[1])\n', (3459, 3470), True, 'import numpy as np\n'), ((3543, 3565), 'numpy.sum', 'np.sum', (['speech_indexes'], {}), '(speech_indexes)\n', (3549, 3565), True, 'import numpy as np\n'), ((3648, 3672), 'numpy.where', 'np.where', (['speech_indexes'], {}), '(speech_indexes)\n', (3656, 3672), True, 'import numpy as np\n'), ((3710, 3735), 'numpy.where', 'np.where', (['(~speech_indexes)'], {}), '(~speech_indexes)\n', (3718, 3735), True, 'import numpy as np\n'), ((4161, 4190), 'pyannote.core.Segment', 'Segment', (['elem_a[0]', 'elem_a[1]'], {}), '(elem_a[0], elem_a[1])\n', (4168, 4190), False, 'from pyannote.core import Segment, Timeline, Annotation\n'), ((4221, 4250), 'pyannote.core.Segment', 'Segment', (['elem_b[0]', 'elem_b[1]'], {}), '(elem_b[0], elem_b[1])\n', (4228, 4250), False, 'from pyannote.core import Segment, Timeline, Annotation\n'), ((4570, 4595), 'pyannote.core.Segment', 'Segment', (['elem[0]', 'elem[1]'], {}), '(elem[0], elem[1])\n', (4577, 4595), False, 'from pyannote.core import Segment, Timeline, Annotation\n'), ((4654, 4679), 'pyannote.core.Segment', 'Segment', (['elem[0]', 'elem[1]'], {}), '(elem[0], elem[1])\n', (4661, 4679), False, 'from pyannote.core import Segment, Timeline, Annotation\n'), ((3111, 3163), 'numpy.logical_and', 'np.logical_and', (['stamps_a_filtered', 'stamps_b_filtered'], {}), '(stamps_a_filtered, stamps_b_filtered)\n', (3125, 3163), True, 'import numpy as np\n')] |
#! /usr/bin/env python
# Mathematica nb from Alex & Laurent
# <EMAIL> major reorg as LG++ 2018 01
# python3 required (int( (len(coeffs) -1)/2 )) because of float int/int result change from python2
import numpy as np
import scipy.special
import numpy.linalg as linalg
import sys
from scipy.special import comb
import os, pickle
from uncertainties import unumpy # pip install if you need
m = 1.0
mm = 1.0e-3 * m
um = 1.0e-6 * m
def scaling(img, photons): # RENAME this function
# img gives a perfect psf to count its total flux
# photons is the desired number of photons (total flux in data)
total = np.sum(img)
print("total", total)
return photons / total
def matrix_operations(img, model, flux = None, verbose=False, linfit=False, dqm=None):
# meta-question: why & when do we use linfit?
# least squares matrix operations to solve A x = b, where A is the model,
# b is the data (image), and x is the coefficient vector we are solving for.
# In 2-D data x = inv(At.A).(At.b)
#
# img 2d array of image data
# dqm 2d bool array of bad pixel locations (same shape as 2d img), or None (for all-good data)
print("leastsqnrm.matrix_operations() - equally-weighted")
flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1])
flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1])
if verbose:
print(f'fringefitting.leastsqnrm.matrix_operations(): ', end='')
print(f'\n\timg {img.shape:} \n\tdqm {dqm.shape:}', end='')
print(f'\n\tL x W = {img.shape[0]:d} x {img.shape[1]:d} = {img.shape[0] * img.shape[1]:d}', end='')
print(f'\n\tflatimg {flatimg.shape:}', end='')
print(f'\n\tflatdqm {flatdqm.shape:}', end='')
# Originally Alex had nans coding bad pixels in the image.
# Anand: re-use the nan terminology code but driven by bad pixel frame
# nanlist shoud get renamed eg donotuselist
if verbose: print('\n\ttype(dqm)', type(dqm), end='')
if dqm is not None: nanlist = np.where(flatdqm==True) # where DO_NOT_USE up.
else: nanlist = (np.array(()), ) # shouldn't occur w/MAST JWST data
if verbose:
print(f'\n\ttype(nanlist) {type(nanlist):}, len={len(nanlist):}', end='')
print(f'\n\tnumber of nanlist pixels: {len(nanlist[0]):d} items', end='')
print(f'\n\t{len(nanlist[0]):d} DO_NOT_USE pixels found in data slice',
end='')
else:
print(f'\t{len(nanlist[0]):d} DO_NOT_USE pixels found in data slice')
flatimg = np.delete(flatimg, nanlist)
if verbose: print(f'\n\tflatimg {flatimg.shape:} after deleting {len(nanlist[0]):d}',
end='')
if flux is not None:
flatimg = flux * flatimg / flatimg.sum()
# A
flatmodel_nan = model.reshape(np.shape(model)[0] * np.shape(model)[1],
np.shape(model)[2])
flatmodel = np.zeros((len(flatimg), np.shape(model)[2]))
if verbose:
print(f'\n\tflatmodel_nan {flatmodel_nan.shape:}', end='')
print(f'\n\tflatmodel {flatmodel.shape:}', end='')
print(f'\n\tdifference {flatmodel_nan.shape[0] - flatmodel.shape[0]:}', end='')
print()
print("flat model dimensions ", np.shape(flatmodel))
print("flat image dimensions ", np.shape(flatimg))
for fringe in range(np.shape(model)[2]):
flatmodel[:,fringe] = np.delete(flatmodel_nan[:,fringe], nanlist)
# At (A transpose)
flatmodeltransp = flatmodel.transpose()
# At.A (makes square matrix)
modelproduct = np.dot(flatmodeltransp, flatmodel)
# At.b
data_vector = np.dot(flatmodeltransp, flatimg)
# inv(At.A)
inverse = linalg.inv(modelproduct)
cond = np.linalg.cond(inverse)
x = np.dot(inverse, data_vector)
res = np.dot(flatmodel, x) - flatimg
# put bad pixels back
naninsert = nanlist[0] - np.arange(len(nanlist[0]))
# calculate residuals with fixed but unused bad pixels as nans
res = np.insert(res, naninsert, np.nan)
res = res.reshape(img.shape[0], img.shape[1])
if verbose:
print('model flux', flux)
print('data flux', flatimg.sum())
print("flat model dimensions ", np.shape(flatmodel))
print("model transpose dimensions ", np.shape(flatmodeltransp))
print("flat image dimensions ", np.shape(flatimg))
print("transpose * image data dimensions", np.shape(data_vector))
print("flat img * transpose dimensions", np.shape(inverse))
if linfit:
try:
from linearfit import linearfit
# dependent variables
M = np.mat(flatimg)
# photon noise
noise = np.sqrt(np.abs(flatimg))
# this sets the weights of pixels fulfilling condition to zero
weights = np.where(np.abs(flatimg)<=1.0, 0.0, 1.0/(noise**2))
# uniform weight
wy = weights
S = np.mat(np.diag(wy));
# matrix of independent variables
C = np.mat(flatmodeltransp)
# initialize object
result = linearfit.LinearFit(M,S,C)
# do the fit
result.fit()
# delete inverse_covariance_matrix to reduce size of pickled file
result.inverse_covariance_matrix = []
linfit_result = result
print("Returned linearfit result")
except ImportError:
linfit_result = None
# if verbose:
print("linearfit module not imported, no covariances saved.")
else:
linfit_result = None
print("linearfit module not imported, no covariances saved.")
return x, res, cond, linfit_result
#######################################################################
def weighted_operations(img, model, verbose=False, dqm=None):
# return x, res, condition_number (None=>no condition number yet), singvals
# x: solution vector
# res: residuals array, nan-flagged for bad dq values?
# cond: condition number not calculateds (no inversion done here, so not available)
# singvals: singular values returned by the SVD solution for the parameters
#
# meta-question: why & when do we use linfit? I removed it here - anand 2022 Jan
# least squares matrix operations to solve A x = b, where
# A is the model,
# b is the data (image),
# x is the coefficient vector we are solving for.
#
# Solution 1: equal weighting of data (matrix_operations()).
# x = inv(At.A).(At.b)
#
# Solution 2: weighting data by Poisson variance (weighted_operations())
# x = inv(At.W.A).(At.W.b)
# where W is a diagonal matrix of weights w_i,
# weighting each data point i by the inverse of its variance:
# w_i = 1 / sigma_i^2
# For photon noise, the data, i.e. the image values b_i have variance
# proportional to b_i with an e.g. ADU to electrons coonversion factor.
# If this factor is the same for all pixels, we do not need to include
# it here (is that really true? Yes I think so because we're not
# normalizing wts here, just ascribing rel wts.).
#
# Possibly replace or campare with a MAD minimization using fast simplex
# https://theoryl1.wordpress.com/2016/08/03/solve-weighted-least-squares-with-numpy/
# Solve for x in Ax = b
#
# np.set_printoptions(formatter={'float': lambda x: '{:+.1e}'.format(x)}, linewidth=80)
#
# Ax = b
# b: data vector nd long; nd=5
# A: model matrix; np x nd matrix 4 x 5: np=4 parameters, nd=5 data points.
# x: parameter, vector np=4 long, unknown
#
# A=np.array([[3,1,4,2],[2,7,1,2],[1,6,1,8],[6,1,8,2],[1,4,1,4]])
# print("A:", A.shape)
# b = np.array([1.2,1.3,1.4,1.5,1.6])
# print("b:", b.shape)
# w = np.array([1,2,3,4,5])
# print("w:", w.shape)
# Aw = A * np.sqrt(w[:,np.newaxis])
# print("Aw:", Aw.shape)
# bw = w * np.sqrt(w)
# x, r, rank, s = np.linalg.lstsq(Aw, bw, rcond=None)
# print("x.shape:", x.shape)
# print("x:", x)
# print("r:", r)
# print("rank:", rank)
# print("s:", s)
# Also a good summary at:
# https://math.stackexchange.com/questions/3094925/weighted-least-squares
# Remove not-to-be-fit data from the flattened "img" data vector
flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1])
flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1])
if dqm is not None: nanlist = np.where(flatdqm==True) # where DO_NOT_USE up.
else: nanlist = (np.array(()), ) # shouldn't occur w/MAST JWST data
# see original linearfit https://github.com/agreenbaum/ImPlaneIA:
# agreenbaum committed on May 21, 2017 1 parent 3e0fb8b
# commit bf02eb52c5813cb5d77036174a7caba703f9d366
#
flatimg = np.delete(flatimg, nanlist) # DATA values
# photon noise variance - proportional to ADU
# (for roughly uniform adu2electron factor)
variance = np.abs(flatimg)
# this resets the weights of pixels with negative or unity values to zero
# we ignore data with unity or lower values - weight it not-at-all..
weights = np.where(flatimg <= 1.0, 0.0, 1.0/np.sqrt(variance)) # anand 2022 Jan
print("fringefitting.leastsqnrm.weighted_operations:", len(nanlist[0]),
"bad pixels skipped in weighted fringefitter")
# A - but delete all pixels flagged by dq array
flatmodel_nan = model.reshape(np.shape(model)[0] * np.shape(model)[1],
np.shape(model)[2])
flatmodel = np.zeros((len(flatimg), np.shape(model)[2]))
for fringe in range(np.shape(model)[2]):
flatmodel[:,fringe] = np.delete(flatmodel_nan[:,fringe], nanlist)
print(flatmodel.shape)
# A.w
# Aw = A * np.sqrt(w[:,np.newaxis]) # w as a column vector
Aw = flatmodel * weights[:,np.newaxis]
# bw = b * np.sqrt(w)
bw = flatimg * weights
# x = np.linalg.lstsq(Aw, bw)[0]
# resids are pixel value residuals, flattened to 1d vector
x, rss, rank, singvals = np.linalg.lstsq(Aw, bw)
#inverse = linalg.inv(Atww)
#cond = np.linalg.cond(inverse)
# actual residuals in image: is this sign convention odd?
# res = np.dot(flatmodel, x) - flatimg
# changed here to data - model
res = flatimg - np.dot(flatmodel, x)
# put bad pixels back
naninsert = nanlist[0] - np.arange(len(nanlist[0]))
# calculate residuals with fixed but unused bad pixels as nans
res = np.insert(res, naninsert, np.nan)
res = res.reshape(img.shape[0], img.shape[1])
cond = None
return x, res, cond, singvals # no condition number yet...
def deltapistons(pistons):
# This function is used for comparison to calculate relative pistons from given pistons (only deltapistons are measured in the fit)
N = len(pistons)
# same alist as above to label holes
alist = []
for i in range(N - 1):
for j in range(N - 1):
if j + i + 1 < N:
alist = np.append(alist, i)
alist = np.append(alist, j + i + 1)
alist = alist.reshape(len(alist)/2, 2)
delta = np.zeros(len(alist))
for q,r in enumerate(alist):
delta[q] = pistons[r[0]] - pistons[r[1]]
return delta
def tan2visibilities(coeffs, verbose=False):
"""
Technically the fit measures phase AND amplitude, so to retrieve
the phase we need to consider both sin and cos terms. Consider one fringe:
A { cos(kx)cos(dphi) + sin(kx)sin(dphi) } =
A(a cos(kx) + b sin(kx)), where a = cos(dphi) and b = sin(dphi)
and A is the fringe amplitude, therefore coupling a and b
In practice we measure A*a and A*b from the coefficients, so:
Ab/Aa = b/a = tan(dphi)
call a' = A*a and b' = A*b (we actually measure a', b')
(A*sin(dphi))^2 + (A*cos(dphi)^2) = A^2 = a'^2 + b'^2
Edit 10/2014: pistons now returned in units of radians!!
Edit 05/2017: <NAME> added support of uncertainty propagation
"""
if type(coeffs[0]).__module__ != 'uncertainties.core':
# if uncertainties not present, proceed as usual
# coefficients of sine terms mulitiplied by 2*pi
delta = np.zeros(int( (len(coeffs) -1)/2 )) # py3
amp = np.zeros(int( (len(coeffs) -1)/2 )) # py3
for q in range(int( (len(coeffs) -1)/2 )): # py3
delta[q] = (np.arctan2(coeffs[2*q+2], coeffs[2*q+1]))
amp[q] = np.sqrt(coeffs[2*q+2]**2 + coeffs[2*q+1]**2)
if verbose:
print("shape coeffs", np.shape(coeffs))
print("shape delta", np.shape(delta))
# returns fringe amplitude & phase
return amp, delta
else:
# propagate uncertainties
qrange = np.arange(int( (len(coeffs) -1)/2 )) # py3
fringephase = unumpy.arctan2(coeffs[2*qrange+2], coeffs[2*qrange+1])
fringeamp = unumpy.sqrt(coeffs[2*qrange+2]**2 + coeffs[2*qrange+1]**2)
return fringeamp, fringephase
def fixeddeltapistons(coeffs, verbose=False):
delta = np.zeros(int( (len(coeffs) -1)/2 )) # py3
for q in range(int( (len(coeffs) -1)/2 )): # py3
delta[q] = np.arcsin((coeffs[2*q+1] + coeffs[2*q+2]) / 2) / (np.pi*2.0)
if verbose:
print("shape coeffs", np.shape(coeffs))
print("shape delta", np.shape(delta))
return delta
def populate_antisymmphasearray(deltaps, N=7):
if type(deltaps[0]).__module__ != 'uncertainties.core':
fringephasearray = np.zeros((N,N))
else:
fringephasearray = unumpy.uarray(np.zeros((N,N)),np.zeros((N,N)))
step=0
n=N-1
for h in range(n):
"""
fringephasearray[0,q+1:] = coeffs[0:6]
fringephasearray[1,q+2:] = coeffs[6:11]
fringephasearray[2,q+3:] = coeffs[11:15]
fringephasearray[3,q+4:] = coeffs[15:18]
fringephasearray[4,q+5:] = coeffs[18:20]
fringephasearray[5,q+6:] = coeffs[20:]
"""
fringephasearray[h, h+1:] = deltaps[step:step+n]
step= step+n
n=n-1
fringephasearray = fringephasearray - fringephasearray.T
return fringephasearray
def populate_symmamparray(amps, N=7):
if type(amps[0]).__module__ != 'uncertainties.core':
fringeamparray = np.zeros((N,N))
else:
fringeamparray = unumpy.uarray(np.zeros((N,N)),np.zeros((N,N)))
step=0
n=N-1
for h in range(n):
fringeamparray[h,h+1:] = amps[step:step+n]
step = step+n
n=n-1
fringeamparray = fringeamparray + fringeamparray.T
return fringeamparray
def phases_and_amplitudes(solution_coefficients, N=7):
# number of solution coefficients
Nsoln = len(solution_coefficients)
# normalise by intensity
soln = np.array([solution_coefficients[i]/solution_coefficients[0] for i in range(Nsoln)])
# compute fringe quantitites
fringeamp, fringephase = tan2visibilities( soln )
# import pdb
# pdb.set_trace()
# compute closure phases
if type(solution_coefficients[0]).__module__ != 'uncertainties.core':
redundant_closure_phases = redundant_cps(np.array(fringephase), N=N)
else:
redundant_closure_phases, fringephasearray = redundant_cps(np.array(fringephase), N=N)
# compute closure amplitudes
redundant_closure_amplitudes = return_CAs(np.array(fringephase), N=N)
return fringephase, fringeamp, redundant_closure_phases, redundant_closure_amplitudes
def redundant_cps(deltaps, N = 7):
fringephasearray = populate_antisymmphasearray(deltaps, N=N)
if type(deltaps[0]).__module__ != 'uncertainties.core':
cps = np.zeros(int(comb(N,3)))
else:
cps = unumpy.uarray( np.zeros(np.int(comb(N,3))),np.zeros(np.int(comb(N,3))) )
nn=0
for kk in range(N-2):
for ii in range(N-kk-2):
for jj in range(N-kk-ii-2):
cps[nn+jj] = fringephasearray[kk, ii+kk+1] \
+ fringephasearray[ii+kk+1, jj+ii+kk+2] \
+ fringephasearray[jj+ii+kk+2, kk]
nn = nn+jj+1
if type(deltaps[0]).__module__ != 'uncertainties.core':
return cps
else:
return cps, fringephasearray
def closurephase(deltap, N=7):
# N is number of holes in the mask
# 7 and 10 holes available (JWST & GPI)
# p is a triangular matrix set up to calculate closure phases
if N == 7:
p = np.array( [ deltap[:6], deltap[6:11], deltap[11:15], \
deltap[15:18], deltap[18:20], deltap[20:] ] )
elif N == 10:
p = np.array( [ deltap[:9], deltap[9:17], deltap[17:24], \
deltap[24:30], deltap[30:35], deltap[35:39], \
deltap[39:42], deltap[42:44], deltap[44:] ] )
else:
print("invalid hole number")
# calculates closure phases for general N-hole mask (with p-array set up properly above)
cps = np.zeros(int((N - 1)*(N - 2)/2)) #py3
for l1 in range(N - 2):
for l2 in range(N - 2 - l1):
cps[int(l1*((N + (N-3) -l1) / 2.0)) + l2] = \
p[l1][0] + p[l1+1][l2] - p[l1][l2+1]
return cps
def return_CAs(amps, N=7):
fringeamparray = populate_symmamparray(amps, N=N)
nn=0
if type(amps[0]).__module__ != 'uncertainties.core':
CAs = np.zeros(int(comb(N,4)))
else:
CAs = unumpy.uarray( np.zeros(np.int(comb(N,4))),np.zeros(np.int(comb(N,4))) )
for ii in range(N-3):
for jj in range(N-ii-3):
for kk in range(N-jj-ii-3):
for ll in range(N-jj-ii-kk-3):
CAs[nn+ll] = fringeamparray[ii,jj+ii+1] \
* fringeamparray[ll+ii+jj+kk+3,kk+jj+ii+2] \
/ (fringeamparray[ii,kk+ii+jj+2]*fringeamparray[jj+ii+1,ll+ii+jj+kk+3])
nn=nn+ll+1
return CAs
| [
"numpy.sqrt",
"numpy.linalg.cond",
"linearfit.linearfit.LinearFit",
"numpy.array",
"numpy.arctan2",
"scipy.special.comb",
"uncertainties.unumpy.arctan2",
"numpy.where",
"numpy.delete",
"numpy.dot",
"numpy.linalg.lstsq",
"uncertainties.unumpy.sqrt",
"numpy.abs",
"numpy.mat",
"numpy.shape"... | [((619, 630), 'numpy.sum', 'np.sum', (['img'], {}), '(img)\n', (625, 630), True, 'import numpy as np\n'), ((2534, 2561), 'numpy.delete', 'np.delete', (['flatimg', 'nanlist'], {}), '(flatimg, nanlist)\n', (2543, 2561), True, 'import numpy as np\n'), ((3560, 3594), 'numpy.dot', 'np.dot', (['flatmodeltransp', 'flatmodel'], {}), '(flatmodeltransp, flatmodel)\n', (3566, 3594), True, 'import numpy as np\n'), ((3624, 3656), 'numpy.dot', 'np.dot', (['flatmodeltransp', 'flatimg'], {}), '(flatmodeltransp, flatimg)\n', (3630, 3656), True, 'import numpy as np\n'), ((3687, 3711), 'numpy.linalg.inv', 'linalg.inv', (['modelproduct'], {}), '(modelproduct)\n', (3697, 3711), True, 'import numpy.linalg as linalg\n'), ((3723, 3746), 'numpy.linalg.cond', 'np.linalg.cond', (['inverse'], {}), '(inverse)\n', (3737, 3746), True, 'import numpy as np\n'), ((3756, 3784), 'numpy.dot', 'np.dot', (['inverse', 'data_vector'], {}), '(inverse, data_vector)\n', (3762, 3784), True, 'import numpy as np\n'), ((3986, 4019), 'numpy.insert', 'np.insert', (['res', 'naninsert', 'np.nan'], {}), '(res, naninsert, np.nan)\n', (3995, 4019), True, 'import numpy as np\n'), ((8874, 8901), 'numpy.delete', 'np.delete', (['flatimg', 'nanlist'], {}), '(flatimg, nanlist)\n', (8883, 8901), True, 'import numpy as np\n'), ((9032, 9047), 'numpy.abs', 'np.abs', (['flatimg'], {}), '(flatimg)\n', (9038, 9047), True, 'import numpy as np\n'), ((10110, 10133), 'numpy.linalg.lstsq', 'np.linalg.lstsq', (['Aw', 'bw'], {}), '(Aw, bw)\n', (10125, 10133), True, 'import numpy as np\n'), ((10547, 10580), 'numpy.insert', 'np.insert', (['res', 'naninsert', 'np.nan'], {}), '(res, naninsert, np.nan)\n', (10556, 10580), True, 'import numpy as np\n'), ((2019, 2044), 'numpy.where', 'np.where', (['(flatdqm == True)'], {}), '(flatdqm == True)\n', (2027, 2044), True, 'import numpy as np\n'), ((3397, 3441), 'numpy.delete', 'np.delete', (['flatmodel_nan[:, fringe]', 'nanlist'], {}), '(flatmodel_nan[:, fringe], nanlist)\n', (3406, 3441), True, 'import numpy as np\n'), ((3795, 3815), 'numpy.dot', 'np.dot', (['flatmodel', 'x'], {}), '(flatmodel, x)\n', (3801, 3815), True, 'import numpy as np\n'), ((8547, 8572), 'numpy.where', 'np.where', (['(flatdqm == True)'], {}), '(flatdqm == True)\n', (8555, 8572), True, 'import numpy as np\n'), ((9739, 9783), 'numpy.delete', 'np.delete', (['flatmodel_nan[:, fringe]', 'nanlist'], {}), '(flatmodel_nan[:, fringe], nanlist)\n', (9748, 9783), True, 'import numpy as np\n'), ((10365, 10385), 'numpy.dot', 'np.dot', (['flatmodel', 'x'], {}), '(flatmodel, x)\n', (10371, 10385), True, 'import numpy as np\n'), ((12865, 12927), 'uncertainties.unumpy.arctan2', 'unumpy.arctan2', (['coeffs[2 * qrange + 2]', 'coeffs[2 * qrange + 1]'], {}), '(coeffs[2 * qrange + 2], coeffs[2 * qrange + 1])\n', (12879, 12927), False, 'from uncertainties import unumpy\n'), ((12940, 13010), 'uncertainties.unumpy.sqrt', 'unumpy.sqrt', (['(coeffs[2 * qrange + 2] ** 2 + coeffs[2 * qrange + 1] ** 2)'], {}), '(coeffs[2 * qrange + 2] ** 2 + coeffs[2 * qrange + 1] ** 2)\n', (12951, 13010), False, 'from uncertainties import unumpy\n'), ((13542, 13558), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (13550, 13558), True, 'import numpy as np\n'), ((14307, 14323), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (14315, 14323), True, 'import numpy as np\n'), ((15404, 15425), 'numpy.array', 'np.array', (['fringephase'], {}), '(fringephase)\n', (15412, 15425), True, 'import numpy as np\n'), ((16486, 16585), 'numpy.array', 'np.array', (['[deltap[:6], deltap[6:11], deltap[11:15], deltap[15:18], deltap[18:20],\n deltap[20:]]'], {}), '([deltap[:6], deltap[6:11], deltap[11:15], deltap[15:18], deltap[18\n :20], deltap[20:]])\n', (16494, 16585), True, 'import numpy as np\n'), ((2088, 2100), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (2096, 2100), True, 'import numpy as np\n'), ((2866, 2881), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (2874, 2881), True, 'import numpy as np\n'), ((3241, 3260), 'numpy.shape', 'np.shape', (['flatmodel'], {}), '(flatmodel)\n', (3249, 3260), True, 'import numpy as np\n'), ((3302, 3319), 'numpy.shape', 'np.shape', (['flatimg'], {}), '(flatimg)\n', (3310, 3319), True, 'import numpy as np\n'), ((3346, 3361), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (3354, 3361), True, 'import numpy as np\n'), ((4203, 4222), 'numpy.shape', 'np.shape', (['flatmodel'], {}), '(flatmodel)\n', (4211, 4222), True, 'import numpy as np\n'), ((4269, 4294), 'numpy.shape', 'np.shape', (['flatmodeltransp'], {}), '(flatmodeltransp)\n', (4277, 4294), True, 'import numpy as np\n'), ((4336, 4353), 'numpy.shape', 'np.shape', (['flatimg'], {}), '(flatimg)\n', (4344, 4353), True, 'import numpy as np\n'), ((4406, 4427), 'numpy.shape', 'np.shape', (['data_vector'], {}), '(data_vector)\n', (4414, 4427), True, 'import numpy as np\n'), ((4478, 4495), 'numpy.shape', 'np.shape', (['inverse'], {}), '(inverse)\n', (4486, 4495), True, 'import numpy as np\n'), ((4621, 4636), 'numpy.mat', 'np.mat', (['flatimg'], {}), '(flatimg)\n', (4627, 4636), True, 'import numpy as np\n'), ((5014, 5037), 'numpy.mat', 'np.mat', (['flatmodeltransp'], {}), '(flatmodeltransp)\n', (5020, 5037), True, 'import numpy as np\n'), ((5092, 5120), 'linearfit.linearfit.LinearFit', 'linearfit.LinearFit', (['M', 'S', 'C'], {}), '(M, S, C)\n', (5111, 5120), False, 'from linearfit import linearfit\n'), ((8616, 8628), 'numpy.array', 'np.array', (['()'], {}), '(())\n', (8624, 8628), True, 'import numpy as np\n'), ((9249, 9266), 'numpy.sqrt', 'np.sqrt', (['variance'], {}), '(variance)\n', (9256, 9266), True, 'import numpy as np\n'), ((9583, 9598), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (9591, 9598), True, 'import numpy as np\n'), ((9688, 9703), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (9696, 9703), True, 'import numpy as np\n'), ((12424, 12472), 'numpy.arctan2', 'np.arctan2', (['coeffs[2 * q + 2]', 'coeffs[2 * q + 1]'], {}), '(coeffs[2 * q + 2], coeffs[2 * q + 1])\n', (12434, 12472), True, 'import numpy as np\n'), ((12488, 12544), 'numpy.sqrt', 'np.sqrt', (['(coeffs[2 * q + 2] ** 2 + coeffs[2 * q + 1] ** 2)'], {}), '(coeffs[2 * q + 2] ** 2 + coeffs[2 * q + 1] ** 2)\n', (12495, 12544), True, 'import numpy as np\n'), ((13213, 13267), 'numpy.arcsin', 'np.arcsin', (['((coeffs[2 * q + 1] + coeffs[2 * q + 2]) / 2)'], {}), '((coeffs[2 * q + 1] + coeffs[2 * q + 2]) / 2)\n', (13222, 13267), True, 'import numpy as np\n'), ((13320, 13336), 'numpy.shape', 'np.shape', (['coeffs'], {}), '(coeffs)\n', (13328, 13336), True, 'import numpy as np\n'), ((13367, 13382), 'numpy.shape', 'np.shape', (['delta'], {}), '(delta)\n', (13375, 13382), True, 'import numpy as np\n'), ((13609, 13625), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (13617, 13625), True, 'import numpy as np\n'), ((13625, 13641), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (13633, 13641), True, 'import numpy as np\n'), ((14372, 14388), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (14380, 14388), True, 'import numpy as np\n'), ((14388, 14404), 'numpy.zeros', 'np.zeros', (['(N, N)'], {}), '((N, N))\n', (14396, 14404), True, 'import numpy as np\n'), ((15187, 15208), 'numpy.array', 'np.array', (['fringephase'], {}), '(fringephase)\n', (15195, 15208), True, 'import numpy as np\n'), ((15292, 15313), 'numpy.array', 'np.array', (['fringephase'], {}), '(fringephase)\n', (15300, 15313), True, 'import numpy as np\n'), ((16633, 16777), 'numpy.array', 'np.array', (['[deltap[:9], deltap[9:17], deltap[17:24], deltap[24:30], deltap[30:35],\n deltap[35:39], deltap[39:42], deltap[42:44], deltap[44:]]'], {}), '([deltap[:9], deltap[9:17], deltap[17:24], deltap[24:30], deltap[30\n :35], deltap[35:39], deltap[39:42], deltap[42:44], deltap[44:]])\n', (16641, 16777), True, 'import numpy as np\n'), ((1255, 1268), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1263, 1268), True, 'import numpy as np\n'), ((1274, 1287), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1282, 1287), True, 'import numpy as np\n'), ((1318, 1331), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1326, 1331), True, 'import numpy as np\n'), ((1337, 1350), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (1345, 1350), True, 'import numpy as np\n'), ((2790, 2805), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (2798, 2805), True, 'import numpy as np\n'), ((2811, 2826), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (2819, 2826), True, 'import numpy as np\n'), ((2926, 2941), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (2934, 2941), True, 'import numpy as np\n'), ((4693, 4708), 'numpy.abs', 'np.abs', (['flatimg'], {}), '(flatimg)\n', (4699, 4708), True, 'import numpy as np\n'), ((4938, 4949), 'numpy.diag', 'np.diag', (['wy'], {}), '(wy)\n', (4945, 4949), True, 'import numpy as np\n'), ((8412, 8425), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8420, 8425), True, 'import numpy as np\n'), ((8431, 8444), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8439, 8444), True, 'import numpy as np\n'), ((8475, 8488), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8483, 8488), True, 'import numpy as np\n'), ((8494, 8507), 'numpy.shape', 'np.shape', (['img'], {}), '(img)\n', (8502, 8507), True, 'import numpy as np\n'), ((9507, 9522), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (9515, 9522), True, 'import numpy as np\n'), ((9528, 9543), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (9536, 9543), True, 'import numpy as np\n'), ((9643, 9658), 'numpy.shape', 'np.shape', (['model'], {}), '(model)\n', (9651, 9658), True, 'import numpy as np\n'), ((11065, 11084), 'numpy.append', 'np.append', (['alist', 'i'], {}), '(alist, i)\n', (11074, 11084), True, 'import numpy as np\n'), ((11109, 11136), 'numpy.append', 'np.append', (['alist', '(j + i + 1)'], {}), '(alist, j + i + 1)\n', (11118, 11136), True, 'import numpy as np\n'), ((12587, 12603), 'numpy.shape', 'np.shape', (['coeffs'], {}), '(coeffs)\n', (12595, 12603), True, 'import numpy as np\n'), ((12638, 12653), 'numpy.shape', 'np.shape', (['delta'], {}), '(delta)\n', (12646, 12653), True, 'import numpy as np\n'), ((15712, 15722), 'scipy.special.comb', 'comb', (['N', '(3)'], {}), '(N, 3)\n', (15716, 15722), False, 'from scipy.special import comb\n'), ((17395, 17405), 'scipy.special.comb', 'comb', (['N', '(4)'], {}), '(N, 4)\n', (17399, 17405), False, 'from scipy.special import comb\n'), ((4817, 4832), 'numpy.abs', 'np.abs', (['flatimg'], {}), '(flatimg)\n', (4823, 4832), True, 'import numpy as np\n'), ((15779, 15789), 'scipy.special.comb', 'comb', (['N', '(3)'], {}), '(N, 3)\n', (15783, 15789), False, 'from scipy.special import comb\n'), ((15807, 15817), 'scipy.special.comb', 'comb', (['N', '(3)'], {}), '(N, 3)\n', (15811, 15817), False, 'from scipy.special import comb\n'), ((17462, 17472), 'scipy.special.comb', 'comb', (['N', '(4)'], {}), '(N, 4)\n', (17466, 17472), False, 'from scipy.special import comb\n'), ((17490, 17500), 'scipy.special.comb', 'comb', (['N', '(4)'], {}), '(N, 4)\n', (17494, 17500), False, 'from scipy.special import comb\n')] |
# Copyright (c) 2019-2021, <NAME>, <NAME>, <NAME>, and <NAME>.
#
# Distributed under the 3-clause BSD license, see accompanying file LICENSE
# or https://github.com/scikit-hep/vector for details.
import typing
"""
.. code-block:: python
@property
Lorentz.t(self)
"""
import numpy
from vector._compute.lorentz import t2
from vector._methods import (
AzimuthalRhoPhi,
AzimuthalXY,
LongitudinalEta,
LongitudinalTheta,
LongitudinalZ,
TemporalT,
TemporalTau,
_aztype,
_flavor_of,
_from_signature,
_ltype,
_ttype,
)
def xy_z_t(lib, x, y, z, t):
return t
def xy_z_tau(lib, x, y, z, tau):
return lib.sqrt(t2.xy_z_tau(lib, x, y, z, tau))
def xy_theta_t(lib, x, y, theta, t):
return t
def xy_theta_tau(lib, x, y, theta, tau):
return lib.sqrt(t2.xy_theta_tau(lib, x, y, theta, tau))
def xy_eta_t(lib, x, y, eta, t):
return t
def xy_eta_tau(lib, x, y, eta, tau):
return lib.sqrt(t2.xy_eta_tau(lib, x, y, eta, tau))
def rhophi_z_t(lib, rho, phi, z, t):
return t
def rhophi_z_tau(lib, rho, phi, z, tau):
return lib.sqrt(t2.rhophi_z_tau(lib, rho, phi, z, tau))
def rhophi_theta_t(lib, rho, phi, theta, t):
return t
def rhophi_theta_tau(lib, rho, phi, theta, tau):
return lib.sqrt(t2.rhophi_theta_tau(lib, rho, phi, theta, tau))
def rhophi_eta_t(lib, rho, phi, eta, t):
return t
def rhophi_eta_tau(lib, rho, phi, eta, tau):
return lib.sqrt(t2.rhophi_eta_tau(lib, rho, phi, eta, tau))
dispatch_map = {
(AzimuthalXY, LongitudinalZ, TemporalT): (xy_z_t, float),
(AzimuthalXY, LongitudinalZ, TemporalTau): (xy_z_tau, float),
(AzimuthalXY, LongitudinalTheta, TemporalT): (xy_theta_t, float),
(AzimuthalXY, LongitudinalTheta, TemporalTau): (xy_theta_tau, float),
(AzimuthalXY, LongitudinalEta, TemporalT): (xy_eta_t, float),
(AzimuthalXY, LongitudinalEta, TemporalTau): (xy_eta_tau, float),
(AzimuthalRhoPhi, LongitudinalZ, TemporalT): (rhophi_z_t, float),
(AzimuthalRhoPhi, LongitudinalZ, TemporalTau): (rhophi_z_tau, float),
(AzimuthalRhoPhi, LongitudinalTheta, TemporalT): (rhophi_theta_t, float),
(AzimuthalRhoPhi, LongitudinalTheta, TemporalTau): (rhophi_theta_tau, float),
(AzimuthalRhoPhi, LongitudinalEta, TemporalT): (rhophi_eta_t, float),
(AzimuthalRhoPhi, LongitudinalEta, TemporalTau): (rhophi_eta_tau, float),
}
def dispatch(v: typing.Any) -> typing.Any:
function, *returns = _from_signature(
__name__,
dispatch_map,
(
_aztype(v),
_ltype(v),
_ttype(v),
),
)
with numpy.errstate(all="ignore"):
return v._wrap_result(
_flavor_of(v),
function(
v.lib,
*v.azimuthal.elements,
*v.longitudinal.elements,
*v.temporal.elements
),
returns,
1,
)
| [
"vector._methods._ttype",
"vector._methods._aztype",
"vector._compute.lorentz.t2.xy_z_tau",
"vector._compute.lorentz.t2.xy_theta_tau",
"vector._compute.lorentz.t2.xy_eta_tau",
"numpy.errstate",
"vector._compute.lorentz.t2.rhophi_z_tau",
"vector._compute.lorentz.t2.rhophi_theta_tau",
"vector._methods... | [((670, 700), 'vector._compute.lorentz.t2.xy_z_tau', 't2.xy_z_tau', (['lib', 'x', 'y', 'z', 'tau'], {}), '(lib, x, y, z, tau)\n', (681, 700), False, 'from vector._compute.lorentz import t2\n'), ((817, 855), 'vector._compute.lorentz.t2.xy_theta_tau', 't2.xy_theta_tau', (['lib', 'x', 'y', 'theta', 'tau'], {}), '(lib, x, y, theta, tau)\n', (832, 855), False, 'from vector._compute.lorentz import t2\n'), ((964, 998), 'vector._compute.lorentz.t2.xy_eta_tau', 't2.xy_eta_tau', (['lib', 'x', 'y', 'eta', 'tau'], {}), '(lib, x, y, eta, tau)\n', (977, 998), False, 'from vector._compute.lorentz import t2\n'), ((1115, 1153), 'vector._compute.lorentz.t2.rhophi_z_tau', 't2.rhophi_z_tau', (['lib', 'rho', 'phi', 'z', 'tau'], {}), '(lib, rho, phi, z, tau)\n', (1130, 1153), False, 'from vector._compute.lorentz import t2\n'), ((1286, 1332), 'vector._compute.lorentz.t2.rhophi_theta_tau', 't2.rhophi_theta_tau', (['lib', 'rho', 'phi', 'theta', 'tau'], {}), '(lib, rho, phi, theta, tau)\n', (1305, 1332), False, 'from vector._compute.lorentz import t2\n'), ((1457, 1499), 'vector._compute.lorentz.t2.rhophi_eta_tau', 't2.rhophi_eta_tau', (['lib', 'rho', 'phi', 'eta', 'tau'], {}), '(lib, rho, phi, eta, tau)\n', (1474, 1499), False, 'from vector._compute.lorentz import t2\n'), ((2619, 2647), 'numpy.errstate', 'numpy.errstate', ([], {'all': '"""ignore"""'}), "(all='ignore')\n", (2633, 2647), False, 'import numpy\n'), ((2535, 2545), 'vector._methods._aztype', '_aztype', (['v'], {}), '(v)\n', (2542, 2545), False, 'from vector._methods import AzimuthalRhoPhi, AzimuthalXY, LongitudinalEta, LongitudinalTheta, LongitudinalZ, TemporalT, TemporalTau, _aztype, _flavor_of, _from_signature, _ltype, _ttype\n'), ((2559, 2568), 'vector._methods._ltype', '_ltype', (['v'], {}), '(v)\n', (2565, 2568), False, 'from vector._methods import AzimuthalRhoPhi, AzimuthalXY, LongitudinalEta, LongitudinalTheta, LongitudinalZ, TemporalT, TemporalTau, _aztype, _flavor_of, _from_signature, _ltype, _ttype\n'), ((2582, 2591), 'vector._methods._ttype', '_ttype', (['v'], {}), '(v)\n', (2588, 2591), False, 'from vector._methods import AzimuthalRhoPhi, AzimuthalXY, LongitudinalEta, LongitudinalTheta, LongitudinalZ, TemporalT, TemporalTau, _aztype, _flavor_of, _from_signature, _ltype, _ttype\n'), ((2692, 2705), 'vector._methods._flavor_of', '_flavor_of', (['v'], {}), '(v)\n', (2702, 2705), False, 'from vector._methods import AzimuthalRhoPhi, AzimuthalXY, LongitudinalEta, LongitudinalTheta, LongitudinalZ, TemporalT, TemporalTau, _aztype, _flavor_of, _from_signature, _ltype, _ttype\n')] |
#
# Copyright (C) 2014-2016 UAVCAN Development Team <dronecan.org>
#
# This software is distributed under the terms of the MIT License.
#
# Author: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
from __future__ import division, absolute_import, print_function, unicode_literals
import decimal
class SourceTimeResolver:
"""
This class contains logic that recovers absolute value of a remote clock observable via small overflowing
integer samples.
For example, consider a remote system that reports timestamps as a 16-bit integer number of milliseconds that
overflows every 60 seconds (this method is used in SLCAN for example). This class can recover the time difference
in the remote clock domain between two arbitrary timestamps, even if the timestamp variable overflowed more than
once between these events.
"""
def __init__(self, source_clock_overflow_period=None):
"""
Args:
source_clock_overflow_period: Overflow period of the remote clock, in seconds.
If not provided, the remote clock is considered to never
overflow (i.e. absolute).
"""
self.source_clock_overflow_period = \
decimal.Decimal(source_clock_overflow_period) if source_clock_overflow_period else None
if self.source_clock_overflow_period is not None and self.source_clock_overflow_period <= 0:
raise ValueError('source_clock_overflow_period must be positive or None')
# Internal states
self._resolved_time = None
self._prev_source_sample = None
self._prev_target_sample = None
def reset(self):
"""
Resets the internal logic; resolved time will start over.
"""
self._resolved_time = None
self._prev_source_sample = None
self._prev_target_sample = None
def update(self, source_clock_sample, target_clock_sample):
"""
Args:
source_clock_sample: Sample of the source clock, in seconds
target_clock_sample: Sample of the target clock, in seconds
Returns: Resolved absolute source clock value
"""
if self._resolved_time is None or self.source_clock_overflow_period is None:
self._resolved_time = decimal.Decimal(source_clock_sample)
self._prev_source_sample = source_clock_sample
self._prev_target_sample = target_clock_sample
else:
# Time between updates in the target clock domain
tgt_delta = target_clock_sample - self._prev_target_sample
self._prev_target_sample = target_clock_sample
assert tgt_delta >= 0
# Time between updates in the source clock domain
src_delta = source_clock_sample - self._prev_source_sample
self._prev_source_sample = source_clock_sample
# Using the target clock we can resolve the integer ambiguity (number of overflows)
full_cycles = int(round((tgt_delta - src_delta) / float(self.source_clock_overflow_period), 0))
# Updating the source clock now; in two steps, in order to avoid error accumulation in floats
self._resolved_time += decimal.Decimal(full_cycles * self.source_clock_overflow_period)
self._resolved_time += decimal.Decimal(src_delta)
return self._resolved_time
class TimestampEstimator:
"""
Based on "A Passive Solution to the Sensor Synchronization Problem" [<NAME> 2010]
https://april.eecs.umich.edu/pdfs/olson2010.pdf
"""
DEFAULT_MAX_DRIFT_PPM = 200
DEFAULT_MAX_PHASE_ERROR_TO_RESYNC = 1.
def __init__(self,
max_rate_error=None,
source_clock_overflow_period=None,
fixed_delay=None,
max_phase_error_to_resync=None):
"""
Args:
max_rate_error: The max drift parameter must be not lower than maximum relative clock
drift in PPM. If the max relative drift is guaranteed to be lower,
reducing this value will improve estimation. The default covers vast
majority of low-cost (and up) crystal oscillators.
source_clock_overflow_period: How often the source clocks wraps over, in seconds.
For example, for SLCAN this value is 60 seconds.
If not provided, the source clock is considered to never wrap over.
fixed_delay: This value will be unconditionally added to the delay estimations.
Represented in seconds. Default is zero.
For USB-interfaced sources it should be safe to use as much as 100 usec.
max_phase_error_to_resync: When this value is exceeded, the estimator will start over.
Defaults to a large value.
"""
self.max_rate_error = float(max_rate_error or (self.DEFAULT_MAX_DRIFT_PPM / 1e6))
self.fixed_delay = fixed_delay or 0
self.max_phase_error_to_resync = max_phase_error_to_resync or self.DEFAULT_MAX_PHASE_ERROR_TO_RESYNC
if self.max_rate_error < 0:
raise ValueError('max_rate_error must be non-negative')
if self.fixed_delay < 0:
raise ValueError('fixed_delay must be non-negative')
if self.max_phase_error_to_resync <= 0:
raise ValueError('max_phase_error_to_resync must be positive')
# This is used to recover absolute source time
self._source_time_resolver = SourceTimeResolver(source_clock_overflow_period=source_clock_overflow_period)
# Refer to the paper for explanations
self._p = None
self._q = None
# Statistics
self._estimated_delay = 0.0
self._resync_count = 0
def update(self, source_clock_sample, target_clock_sample):
"""
Args:
source_clock_sample: E.g. value received from the source system, in seconds
target_clock_sample: E.g. target time sampled when the data arrived to the local system, in seconds
Returns: Event timestamp converted to the target time domain.
"""
pi = float(self._source_time_resolver.update(source_clock_sample, target_clock_sample))
qi = target_clock_sample
# Initialization
if self._p is None:
self._p = pi
self._q = qi
# Sync error - refer to the reference implementation of the algorithm
self._estimated_delay = abs((pi - self._p) - (qi - self._q))
# Resynchronization (discarding known state)
if self._estimated_delay > self.max_phase_error_to_resync:
self._source_time_resolver.reset()
self._resync_count += 1
self._p = pi = float(self._source_time_resolver.update(source_clock_sample, target_clock_sample))
self._q = qi
# Offset options
assert pi >= self._p
offset = self._p - self._q - self.max_rate_error * (pi - self._p) - self.fixed_delay
new_offset = pi - qi - self.fixed_delay
# Updating p/q if the new offset is lower by magnitude
if new_offset >= offset:
offset = new_offset
self._p = pi
self._q = qi
ti = pi - offset
return ti
@property
def estimated_delay(self):
"""Estimated delay, updated in the last call to update()"""
return self._estimated_delay
@property
def resync_count(self):
return self._resync_count
if __name__ == '__main__':
# noinspection PyPackageRequirements
import matplotlib.pyplot as plt
# noinspection PyPackageRequirements
import numpy
import time
if 1:
estimator = TimestampEstimator()
print(estimator.update(.0, 1000.0))
print(estimator.update(.1, 1000.1))
print(estimator.update(.2, 1000.1)) # Repeat
print(estimator.update(.3, 1000.1)) # Repeat
print(estimator.update(.4, 1000.2))
print(estimator.update(.5, 1000.3))
if 1:
# Conversion from Real to Monotonic
estimator = TimestampEstimator(max_rate_error=1e-5,
fixed_delay=1e-6,
max_phase_error_to_resync=1e-2)
print('Initial mono to real:', time.time() - time.monotonic())
while True:
mono = time.monotonic()
real = time.time()
est_real = estimator.update(mono, real)
mono_to_real_offset = est_real - mono
print(mono_to_real_offset)
time.sleep(1)
max_rate_error = None
source_clock_range = 10
delay_min = 0.0001
delay_max = 0.02
num_samples = 200
x = range(num_samples)
delays = numpy.random.uniform(delay_min, delay_max, size=num_samples)
estimator = TimestampEstimator(max_rate_error=max_rate_error, fixed_delay=delay_min,
source_clock_overflow_period=source_clock_range)
source_clocks = []
estimated_times = []
offset_errors = []
estimated_delays = []
for i, delay in enumerate(delays):
source_clock = i
source_clocks.append(source_clock)
target_clock = i + delay
estimated_time = estimator.update(source_clock % source_clock_range, target_clock)
estimated_times.append(estimated_time)
offset_errors.append(estimated_time - source_clock)
estimated_delays.append(estimator.estimated_delay)
fig = plt.figure()
ax1 = fig.add_subplot(211)
ax1.plot(x, numpy.array(delays) * 1e3)
ax1.plot(x, numpy.array(offset_errors) * 1e3)
ax2 = fig.add_subplot(212)
ax2.plot(x, (numpy.array(estimated_times) - numpy.array(source_clocks)) * 1e3)
plt.show()
| [
"time.monotonic",
"time.sleep",
"numpy.array",
"matplotlib.pyplot.figure",
"numpy.random.uniform",
"time.time",
"decimal.Decimal",
"matplotlib.pyplot.show"
] | [((9066, 9126), 'numpy.random.uniform', 'numpy.random.uniform', (['delay_min', 'delay_max'], {'size': 'num_samples'}), '(delay_min, delay_max, size=num_samples)\n', (9086, 9126), False, 'import numpy\n'), ((9808, 9820), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (9818, 9820), True, 'import matplotlib.pyplot as plt\n'), ((10066, 10076), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (10074, 10076), True, 'import matplotlib.pyplot as plt\n'), ((1264, 1309), 'decimal.Decimal', 'decimal.Decimal', (['source_clock_overflow_period'], {}), '(source_clock_overflow_period)\n', (1279, 1309), False, 'import decimal\n'), ((2330, 2366), 'decimal.Decimal', 'decimal.Decimal', (['source_clock_sample'], {}), '(source_clock_sample)\n', (2345, 2366), False, 'import decimal\n'), ((3265, 3329), 'decimal.Decimal', 'decimal.Decimal', (['(full_cycles * self.source_clock_overflow_period)'], {}), '(full_cycles * self.source_clock_overflow_period)\n', (3280, 3329), False, 'import decimal\n'), ((3365, 3391), 'decimal.Decimal', 'decimal.Decimal', (['src_delta'], {}), '(src_delta)\n', (3380, 3391), False, 'import decimal\n'), ((8689, 8705), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8703, 8705), False, 'import time\n'), ((8725, 8736), 'time.time', 'time.time', ([], {}), '()\n', (8734, 8736), False, 'import time\n'), ((8890, 8903), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (8900, 8903), False, 'import time\n'), ((9869, 9888), 'numpy.array', 'numpy.array', (['delays'], {}), '(delays)\n', (9880, 9888), False, 'import numpy\n'), ((9912, 9938), 'numpy.array', 'numpy.array', (['offset_errors'], {}), '(offset_errors)\n', (9923, 9938), False, 'import numpy\n'), ((8618, 8629), 'time.time', 'time.time', ([], {}), '()\n', (8627, 8629), False, 'import time\n'), ((8632, 8648), 'time.monotonic', 'time.monotonic', ([], {}), '()\n', (8646, 8648), False, 'import time\n'), ((9995, 10023), 'numpy.array', 'numpy.array', (['estimated_times'], {}), '(estimated_times)\n', (10006, 10023), False, 'import numpy\n'), ((10026, 10052), 'numpy.array', 'numpy.array', (['source_clocks'], {}), '(source_clocks)\n', (10037, 10052), False, 'import numpy\n')] |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import warnings
from distutils.version import LooseVersion
from .pycompat import OrderedDict, zip, dask_array_type
from .common import full_like
from .combine import concat
from .ops import (inject_bottleneck_rolling_methods,
inject_datasetrolling_methods, has_bottleneck, bn)
from .dask_array_ops import dask_rolling_wrapper
class Rolling(object):
"""A object that implements the moving window pattern.
See Also
--------
Dataset.groupby
DataArray.groupby
Dataset.rolling
DataArray.rolling
"""
_attributes = ['window', 'min_periods', 'center', 'dim']
def __init__(self, obj, min_periods=None, center=False, **windows):
"""
Moving window object.
Parameters
----------
obj : Dataset or DataArray
Object to window.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
"""
if (has_bottleneck and
(LooseVersion(bn.__version__) < LooseVersion('1.0'))):
warnings.warn('xarray requires bottleneck version of 1.0 or '
'greater for rolling operations. Rolling '
'aggregation methods will use numpy instead'
'of bottleneck.')
if len(windows) != 1:
raise ValueError('exactly one dim/window should be provided')
dim, window = next(iter(windows.items()))
if window <= 0:
raise ValueError('window must be > 0')
self.obj = obj
# attributes
self.window = window
self.min_periods = min_periods
if min_periods is None:
self._min_periods = window
else:
if min_periods <= 0:
raise ValueError(
'min_periods must be greater than zero or None')
self._min_periods = min_periods
self.center = center
self.dim = dim
def __repr__(self):
"""provide a nice str repr of our rolling object"""
attrs = ["{k}->{v}".format(k=k, v=getattr(self, k))
for k in self._attributes if getattr(self, k, None) is not None]
return "{klass} [{attrs}]".format(klass=self.__class__.__name__,
attrs=','.join(attrs))
def __len__(self):
return self.obj.sizes[self.dim]
class DataArrayRolling(Rolling):
"""
This class adds the following class methods;
+ _reduce_method(cls, func)
+ _bottleneck_reduce(cls, func)
These class methods will be used to inject numpy or bottleneck function
by doing
>>> func = cls._reduce_method(f)
>>> func.__name__ = name
>>> setattr(cls, name, func)
in ops.inject_bottleneck_rolling_methods.
After the injection, the Rolling object will have `name` (such as `mean` or
`median`) methods,
e.g. it enables the following call,
>>> data.rolling().mean()
If bottleneck is installed, some bottleneck methods will be used instdad of
the numpy method.
see also
+ rolling.DataArrayRolling
+ ops.inject_bottleneck_rolling_methods
"""
def __init__(self, obj, min_periods=None, center=False, **windows):
super(DataArrayRolling, self).__init__(obj, min_periods=min_periods,
center=center, **windows)
self._windows = None
self._valid_windows = None
self.window_indices = None
self.window_labels = None
self._setup_windows()
@property
def windows(self):
if self._windows is None:
self._windows = OrderedDict(zip(self.window_labels,
self.window_indices))
return self._windows
def __iter__(self):
for (label, indices, valid) in zip(self.window_labels,
self.window_indices,
self._valid_windows):
window = self.obj.isel(**{self.dim: indices})
if not valid:
window = full_like(window, fill_value=True, dtype=bool)
yield (label, window)
def _setup_windows(self):
"""
Find the indices and labels for each window
"""
from .dataarray import DataArray
self.window_labels = self.obj[self.dim]
window = int(self.window)
dim_size = self.obj[self.dim].size
stops = np.arange(dim_size) + 1
starts = np.maximum(stops - window, 0)
if self._min_periods > 1:
valid_windows = (stops - starts) >= self._min_periods
else:
# No invalid windows
valid_windows = np.ones(dim_size, dtype=bool)
self._valid_windows = DataArray(valid_windows, dims=(self.dim, ),
coords=self.obj[self.dim].coords)
self.window_indices = [slice(start, stop)
for start, stop in zip(starts, stops)]
def _center_result(self, result):
"""center result"""
shift = (-self.window // 2) + 1
return result.shift(**{self.dim: shift})
def reduce(self, func, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
"""
windows = [window.reduce(func, dim=self.dim, **kwargs)
for _, window in self]
# Find valid windows based on count
if self.dim in self.obj.coords:
concat_dim = self.window_labels
else:
concat_dim = self.dim
counts = concat([window.count(dim=self.dim) for _, window in self],
dim=concat_dim)
result = concat(windows, dim=concat_dim)
# restore dim order
result = result.transpose(*self.obj.dims)
result = result.where(counts >= self._min_periods)
if self.center:
result = self._center_result(result)
return result
@classmethod
def _reduce_method(cls, func):
"""
Methods to return a wrapped function for any function `func` for
numpy methods.
"""
def wrapped_func(self, **kwargs):
return self.reduce(func, **kwargs)
return wrapped_func
@classmethod
def _bottleneck_reduce(cls, func):
"""
Methods to return a wrapped function for any function `func` for
bottoleneck method, except for `median`.
"""
def wrapped_func(self, **kwargs):
from .dataarray import DataArray
# bottleneck doesn't allow min_count to be 0, although it should
# work the same as if min_count = 1
if self.min_periods is not None and self.min_periods == 0:
min_count = 1
else:
min_count = self.min_periods
axis = self.obj.get_axis_num(self.dim)
if isinstance(self.obj.data, dask_array_type):
values = dask_rolling_wrapper(func, self.obj.data,
window=self.window,
min_count=min_count,
axis=axis)
else:
values = func(self.obj.data, window=self.window,
min_count=min_count, axis=axis)
result = DataArray(values, self.obj.coords)
if self.center:
result = self._center_result(result)
return result
return wrapped_func
class DatasetRolling(Rolling):
"""An object that implements the moving window pattern for Dataset.
This class has an OrderedDict named self.rollings, that is a collection of
DataArrayRollings for all the DataArrays in the Dataset, except for those
not depending on rolling dimension.
reduce() method returns a new Dataset generated from a set of
self.rollings[key].reduce().
See Also
--------
Dataset.groupby
DataArray.groupby
Dataset.rolling
DataArray.rolling
"""
def __init__(self, obj, min_periods=None, center=False, **windows):
"""
Moving window object for Dataset.
Parameters
----------
obj : Dataset
Object to window.
min_periods : int, default None
Minimum number of observations in window required to have a value
(otherwise result is NA). The default, None, is equivalent to
setting min_periods equal to the size of the window.
center : boolean, default False
Set the labels at the center of the window.
**windows : dim=window
dim : str
Name of the dimension to create the rolling iterator
along (e.g., `time`).
window : int
Size of the moving window.
Returns
-------
rolling : type of input argument
"""
super(DatasetRolling, self).__init__(obj,
min_periods, center, **windows)
if self.dim not in self.obj.dims:
raise KeyError(self.dim)
# Keep each Rolling object as an OrderedDict
self.rollings = OrderedDict()
for key, da in self.obj.data_vars.items():
# keeps rollings only for the dataset depending on slf.dim
if self.dim in da.dims:
self.rollings[key] = DataArrayRolling(da, min_periods,
center, **windows)
def reduce(self, func, **kwargs):
"""Reduce the items in this group by applying `func` along some
dimension(s).
Parameters
----------
func : function
Function which can be called in the form
`func(x, **kwargs)` to return the result of collapsing an
np.ndarray over an the rolling dimension.
**kwargs : dict
Additional keyword arguments passed on to `func`.
Returns
-------
reduced : DataArray
Array with summarized data.
"""
from .dataset import Dataset
reduced = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
reduced[key] = self.rollings[key].reduce(func, **kwargs)
else:
reduced[key] = self.obj[key]
return Dataset(reduced, coords=self.obj.coords)
@classmethod
def _reduce_method(cls, func):
"""
Return a wrapped function for injecting numpy and bottoleneck methods.
see ops.inject_datasetrolling_methods
"""
def wrapped_func(self, **kwargs):
from .dataset import Dataset
reduced = OrderedDict()
for key, da in self.obj.data_vars.items():
if self.dim in da.dims:
reduced[key] = getattr(self.rollings[key],
func.__name__)(**kwargs)
else:
reduced[key] = self.obj[key]
return Dataset(reduced, coords=self.obj.coords)
return wrapped_func
inject_bottleneck_rolling_methods(DataArrayRolling)
inject_datasetrolling_methods(DatasetRolling)
| [
"numpy.ones",
"warnings.warn",
"distutils.version.LooseVersion",
"numpy.maximum",
"numpy.arange"
] | [((5211, 5240), 'numpy.maximum', 'np.maximum', (['(stops - window)', '(0)'], {}), '(stops - window, 0)\n', (5221, 5240), True, 'import numpy as np\n'), ((1739, 1907), 'warnings.warn', 'warnings.warn', (['"""xarray requires bottleneck version of 1.0 or greater for rolling operations. Rolling aggregation methods will use numpy insteadof bottleneck."""'], {}), "(\n 'xarray requires bottleneck version of 1.0 or greater for rolling operations. Rolling aggregation methods will use numpy insteadof bottleneck.'\n )\n", (1752, 1907), False, 'import warnings\n'), ((5170, 5189), 'numpy.arange', 'np.arange', (['dim_size'], {}), '(dim_size)\n', (5179, 5189), True, 'import numpy as np\n'), ((5417, 5446), 'numpy.ones', 'np.ones', (['dim_size'], {'dtype': 'bool'}), '(dim_size, dtype=bool)\n', (5424, 5446), True, 'import numpy as np\n'), ((1673, 1701), 'distutils.version.LooseVersion', 'LooseVersion', (['bn.__version__'], {}), '(bn.__version__)\n', (1685, 1701), False, 'from distutils.version import LooseVersion\n'), ((1704, 1723), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.0"""'], {}), "('1.0')\n", (1716, 1723), False, 'from distutils.version import LooseVersion\n')] |
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import os
import numpy as np
def TestReduction(op, data, axes, keepdims):
if op == "ReduceL1":
return np.sum(a=np.abs(data), axis=axes, keepdims=keepdims)
elif op == "ReduceL2":
return np.sqrt(np.sum(a=np.square(data), axis=axes, keepdims=keepdims))
elif op == "ReduceLogSum":
return np.log(np.sum(data, axis=axes, keepdims=keepdims))
elif op == "ReduceLogSumExp":
return np.log(np.sum(np.exp(data), axis=axes, keepdims=keepdims))
elif op == "ReduceMax":
return np.max(data, axis=axes, keepdims=keepdims)
elif op == "ReduceMean":
return np.mean(data, axis=axes, keepdims=keepdims)
elif op == "ReduceMin":
return np.min(data, axis=axes, keepdims=keepdims)
elif op == "ReduceProd":
return np.prod(data, axis=axes, keepdims=keepdims)
elif op == "ReduceSum":
return np.sum(data, axis=axes, keepdims=keepdims)
elif op == "ReduceSumSquare":
return np.sum(np.square(data), axis=axes, keepdims=keepdims)
elif op == "ArgMax":
axis = axes[0] if axes else 0
res = np.argmax(data, axis)
if keepdims:
res = np.expand_dims(res, axis)
return res
elif op == "ArgMin":
axis = axes[0] if axes else 0
res = np.argmin(data, axis)
if keepdims:
res = np.expand_dims(res, axis)
return res
def PrintResult(op, axes, keepdims, res):
print(" {\"%s\"," % op)
print("OpAttributesResult(")
print(" // ReductionAttribute")
print(" {")
print (" // axes_")
print ("{", end='')
print(*axes, sep=", ", end='') if axes else print("")
print ("},")
print (" // keep_dims_")
print (keepdims, ",")
print ("},")
print (" // expected dims")
print ("{", end='')
print(*res.shape, sep=", ", end='')
print ("},")
print (" // expected values")
print ("{", end='')
for i in range(0, res.size):
print("%5.6ff," % res.item(i))
print ("})},")
def PrintDisableOptimizations():
print ("// Optimizations are disabled in this file to improve build throughput")
print ("#if defined(_MSC_VER) || defined(__INTEL_COMPILER)")
print ("#pragma optimize (\"\", off)")
print ("#elif defined(__GNUC__)")
print ("#if defined(__clang__)")
print ("\t#pragma clang optimize off")
print ("#else")
print ("\t#pragma GCC push_options")
print ("\t#pragma GCC optimize (\"O0\")")
print ("#endif")
print ("#endif")
def PrintReenableOptimizations():
print ("#if defined(_MSC_VER) || defined(__INTEL_COMPILER)")
print ("t#pragma optimize (\"\", on)")
print ("#elif defined(__GNUC__)")
print ("#if defined(__clang__)")
print ("\t#pragma clang optimize on")
print ("#else")
print ("\t#pragma GCC pop_options")
print ("#endif")
print ("#endif")
if __name__ == "__main__":
from itertools import product
input_shape = [2,3,2,2,3]
np.random.seed(0)
input_data = np.random.uniform(size=input_shape)
axes_options = [(2,3), (2, 1, 4), (0, 2, 3), (0,), (2,), (4,), None]
keepdims_options = [0, 1]
ops = ["ReduceL1", "ReduceL2", "ReduceLogSum", "ReduceLogSumExp", "ReduceMax", "ReduceMean",
"ReduceMin", "ReduceProd", "ReduceSum", "ReduceSumSquare", "ArgMax", "ArgMin"]
print ("// Please don't manually edit this file. Generated from reduction_test_cases_generator.py")
PrintDisableOptimizations()
print ("ReductionTestCases testcases = {")
print ("// input_data")
print ("{")
for i in range(0, input_data.size):
print("%5.6ff," % input_data.item(i),)
print ("},")
print ("// input_dims")
print ("{", end='')
print(*input_shape, sep=", ", end='')
print ("},")
print(" // map_op_attribute_expected")
print ("{")
for config in product(axes_options, keepdims_options, ops):
axes, keepdims, op = config
#ArgMax and ArgMin only take single axis (default 0)
skip = False;
if op == "ArgMax" or op == "ArgMin":
skip = axes is not None and len(axes) > 1
if not skip:
res = TestReduction(op, input_data, axes, keepdims)
PrintResult(op, axes, keepdims, res)
print ("}")
print ("};")
PrintReenableOptimizations()
| [
"numpy.abs",
"numpy.mean",
"numpy.prod",
"itertools.product",
"numpy.min",
"numpy.argmax",
"numpy.square",
"numpy.max",
"numpy.sum",
"numpy.exp",
"numpy.random.seed",
"numpy.expand_dims",
"numpy.random.uniform",
"numpy.argmin"
] | [((3060, 3077), 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), '(0)\n', (3074, 3077), True, 'import numpy as np\n'), ((3095, 3130), 'numpy.random.uniform', 'np.random.uniform', ([], {'size': 'input_shape'}), '(size=input_shape)\n', (3112, 3130), True, 'import numpy as np\n'), ((3944, 3988), 'itertools.product', 'product', (['axes_options', 'keepdims_options', 'ops'], {}), '(axes_options, keepdims_options, ops)\n', (3951, 3988), False, 'from itertools import product\n'), ((219, 231), 'numpy.abs', 'np.abs', (['data'], {}), '(data)\n', (225, 231), True, 'import numpy as np\n'), ((423, 465), 'numpy.sum', 'np.sum', (['data'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(data, axis=axes, keepdims=keepdims)\n', (429, 465), True, 'import numpy as np\n'), ((322, 337), 'numpy.square', 'np.square', (['data'], {}), '(data)\n', (331, 337), True, 'import numpy as np\n'), ((618, 660), 'numpy.max', 'np.max', (['data'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(data, axis=axes, keepdims=keepdims)\n', (624, 660), True, 'import numpy as np\n'), ((530, 542), 'numpy.exp', 'np.exp', (['data'], {}), '(data)\n', (536, 542), True, 'import numpy as np\n'), ((705, 748), 'numpy.mean', 'np.mean', (['data'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(data, axis=axes, keepdims=keepdims)\n', (712, 748), True, 'import numpy as np\n'), ((792, 834), 'numpy.min', 'np.min', (['data'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(data, axis=axes, keepdims=keepdims)\n', (798, 834), True, 'import numpy as np\n'), ((879, 922), 'numpy.prod', 'np.prod', (['data'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(data, axis=axes, keepdims=keepdims)\n', (886, 922), True, 'import numpy as np\n'), ((966, 1008), 'numpy.sum', 'np.sum', (['data'], {'axis': 'axes', 'keepdims': 'keepdims'}), '(data, axis=axes, keepdims=keepdims)\n', (972, 1008), True, 'import numpy as np\n'), ((1065, 1080), 'numpy.square', 'np.square', (['data'], {}), '(data)\n', (1074, 1080), True, 'import numpy as np\n'), ((1189, 1210), 'numpy.argmax', 'np.argmax', (['data', 'axis'], {}), '(data, axis)\n', (1198, 1210), True, 'import numpy as np\n'), ((1250, 1275), 'numpy.expand_dims', 'np.expand_dims', (['res', 'axis'], {}), '(res, axis)\n', (1264, 1275), True, 'import numpy as np\n'), ((1372, 1393), 'numpy.argmin', 'np.argmin', (['data', 'axis'], {}), '(data, axis)\n', (1381, 1393), True, 'import numpy as np\n'), ((1433, 1458), 'numpy.expand_dims', 'np.expand_dims', (['res', 'axis'], {}), '(res, axis)\n', (1447, 1458), True, 'import numpy as np\n')] |
import numpy as np
from starfish import ImageStack
from starfish.core.image.Filter.zero_by_channel_magnitude import ZeroByChannelMagnitude
def create_imagestack_with_magnitude_scale():
"""create an imagestack with increasing magnitudes"""
data = np.linspace(0, 1, 11, dtype=np.float32)
data = np.repeat(data[None, :], 2, axis=0)
# reshape data into a 2-channel, (1, 11, 1) image in (x, y, z)
data = data.reshape(1, 2, 1, 11, 1)
imagestack = ImageStack.from_numpy(data)
return imagestack
def test_zero_by_channel_magnitude_produces_accurate_results():
imagestack = create_imagestack_with_magnitude_scale()
zcm = ZeroByChannelMagnitude(thresh=np.inf, normalize=False)
filtered = zcm.run(imagestack, in_place=False, n_processes=1)
assert np.all(filtered.xarray == 0)
| [
"starfish.ImageStack.from_numpy",
"numpy.repeat",
"starfish.core.image.Filter.zero_by_channel_magnitude.ZeroByChannelMagnitude",
"numpy.linspace",
"numpy.all"
] | [((256, 295), 'numpy.linspace', 'np.linspace', (['(0)', '(1)', '(11)'], {'dtype': 'np.float32'}), '(0, 1, 11, dtype=np.float32)\n', (267, 295), True, 'import numpy as np\n'), ((307, 342), 'numpy.repeat', 'np.repeat', (['data[None, :]', '(2)'], {'axis': '(0)'}), '(data[None, :], 2, axis=0)\n', (316, 342), True, 'import numpy as np\n'), ((467, 494), 'starfish.ImageStack.from_numpy', 'ImageStack.from_numpy', (['data'], {}), '(data)\n', (488, 494), False, 'from starfish import ImageStack\n'), ((652, 706), 'starfish.core.image.Filter.zero_by_channel_magnitude.ZeroByChannelMagnitude', 'ZeroByChannelMagnitude', ([], {'thresh': 'np.inf', 'normalize': '(False)'}), '(thresh=np.inf, normalize=False)\n', (674, 706), False, 'from starfish.core.image.Filter.zero_by_channel_magnitude import ZeroByChannelMagnitude\n'), ((784, 812), 'numpy.all', 'np.all', (['(filtered.xarray == 0)'], {}), '(filtered.xarray == 0)\n', (790, 812), True, 'import numpy as np\n')] |
import numpy as np
def generate_features(implementation_version, draw_graphs, raw_data, axes, sampling_freq, scale_axes):
# features is a 1D array, reshape so we have a matrix
raw_data = raw_data.reshape(int(len(raw_data) / len(axes)), len(axes))
features = []
graphs = []
# split out the data from all axes
for ax in range(0, len(axes)):
X = []
for ix in range(0, raw_data.shape[0]):
X.append(float(raw_data[ix][ax]))
# X now contains only the current axis
fx = np.array(X)
# process the signal here
fx = fx * scale_axes
# we need to return a 1D array again, so flatten here again
for f in fx:
features.append(f)
return {
'features': features,
'graphs': graphs,
# if you use FFTs then set the used FFTs here (this helps with memory optimization on MCUs)
'fft_used': [],
'output_config': {
# type can be 'flat', 'image' or 'spectrogram'
'type': 'flat',
'shape': {
# shape should be { width, height, channels } for image, { width, height } for spectrogram
'width': len(features)
}
}
}
| [
"numpy.array"
] | [((535, 546), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (543, 546), True, 'import numpy as np\n')] |
import parasail
from .._util._multiprocessing import EnhancedPool as Pool
import itertools
from anndata import AnnData
from typing import Union, Collection, List, Tuple, Dict, Callable
from .._compat import Literal
import numpy as np
from scanpy import logging
import numpy.testing as npt
from .._util import _is_na, _is_symmetric, _reduce_nonzero
import abc
from Levenshtein import distance as levenshtein_dist
import scipy.spatial
import scipy.sparse
from scipy.sparse import coo_matrix, csr_matrix, lil_matrix
from functools import reduce
from collections import Counter
class _DistanceCalculator(abc.ABC):
DTYPE = "uint8"
def __init__(self, cutoff: float, n_jobs: Union[int, None] = None):
"""
Parameters
----------
cutoff:
Will eleminate distances > cutoff to make efficient
use of sparse matrices.
n_jobs
Number of jobs to use for the pairwise distance calculation.
If None, use all jobs.
"""
if cutoff > 255:
raise ValueError(
"Using a cutoff > 255 is not possible due to the `uint8` dtype used"
)
self.cutoff = cutoff
self.n_jobs = n_jobs
@abc.abstractmethod
def calc_dist_mat(self, seqs: np.ndarray) -> coo_matrix:
"""Calculate the upper diagnoal, pairwise distance matrix of all
sequences in `seq`.
* Only returns distances <= cutoff
* Distances are non-negative values.
* The resulting matrix is offsetted by 1 to allow efficient use
of sparse matrices ($d' = d+1$).
I.e. 0 -> d > cutoff; 1 -> d == 0; 2 -> d == 1; ...
"""
pass
class _IdentityDistanceCalculator(_DistanceCalculator):
"""Calculate the distance between TCR based on the identity
of sequences. I.e. 0 = sequence identical, 1 = sequences not identical
"""
def __init__(self, cutoff: float = 0, n_jobs: Union[int, None] = None):
"""For this DistanceCalculator, per definition, the cutoff = 0.
The `cutoff` argument is ignored. """
super().__init__(cutoff, n_jobs)
def calc_dist_mat(self, seqs: np.ndarray) -> coo_matrix:
"""The offsetted matrix is the identity matrix."""
return scipy.sparse.identity(len(seqs), dtype=self.DTYPE, format="coo")
class _LevenshteinDistanceCalculator(_DistanceCalculator):
"""Calculates the Levenshtein (i.e. edit-distance) between sequences. """
def _compute_row(self, seqs: np.ndarray, i_row: int) -> coo_matrix:
"""Compute a row of the upper diagnomal distance matrix"""
target = seqs[i_row]
def coord_generator():
for j, s2 in enumerate(seqs[i_row:], start=i_row):
d = levenshtein_dist(target, s2)
if d <= self.cutoff:
yield d + 1, j
d, col = zip(*coord_generator())
row = np.zeros(len(col), dtype="int")
return coo_matrix((d, (row, col)), dtype=self.DTYPE, shape=(1, seqs.size))
def calc_dist_mat(self, seqs: np.ndarray) -> csr_matrix:
p = Pool(self.n_jobs)
rows = p.starmap_progress(
self._compute_row,
zip(itertools.repeat(seqs), range(len(seqs))),
chunksize=200,
total=len(seqs),
)
p.close()
score_mat = scipy.sparse.vstack(rows)
score_mat.eliminate_zeros()
assert score_mat.shape[0] == score_mat.shape[1]
return score_mat
class _AlignmentDistanceCalculator(_DistanceCalculator):
"""Calculates distance between sequences based on pairwise sequence alignment.
The distance between two sequences is defined as $S_{1,2}^{max} - S_{1,2}$
where $S_{1,2} $ is the alignment score of sequences 1 and 2 and $S_{1,2}^{max}$
is the max. achievable alignment score of sequences 1 and 2 defined as
$\\min(S_{1,1}, S_{2,2})$.
"""
def __init__(
self,
cutoff: float,
n_jobs: Union[int, None] = None,
*,
subst_mat: str = "blosum62",
gap_open: int = 11,
gap_extend: int = 1,
):
"""Class to generate pairwise alignment distances
High-performance sequence alignment through parasail library [Daily2016]_
Parameters
----------
cutoff
see `_DistanceCalculator`
n_jobs
see `_DistanceCalculator`
subst_mat
Name of parasail substitution matrix
gap_open
Gap open penalty
gap_extend
Gap extend penatly
"""
super().__init__(cutoff, n_jobs)
self.subst_mat = subst_mat
self.gap_open = gap_open
self.gap_extend = gap_extend
def _align_row(
self, seqs: np.ndarray, self_alignment_scores: np.array, i_row: int
) -> np.ndarray:
"""Generates a row of the triangular distance matrix.
Aligns `seqs[i_row]` with all other sequences in `seqs[i_row:]`.
Parameters
----------
seqs
Array of amino acid sequences
self_alignment_scores
Array containing the scores of aligning each sequence in `seqs`
with itself. This is used as a reference value to turn
alignment scores into distances.
i_row
Index of the row in the final distance matrix. Determines the target sequence.
Returns
-------
The i_th row of the final score matrix.
"""
subst_mat = parasail.Matrix(self.subst_mat)
target = seqs[i_row]
profile = parasail.profile_create_16(target, subst_mat)
def coord_generator():
for j, s2 in enumerate(seqs[i_row:], start=i_row):
r = parasail.nw_scan_profile_16(
profile, s2, self.gap_open, self.gap_extend
)
max_score = np.min(self_alignment_scores[[i_row, j]])
d = max_score - r.score
if d <= self.cutoff:
yield d + 1, j
d, col = zip(*coord_generator())
row = np.zeros(len(col), dtype="int")
return coo_matrix((d, (row, col)), dtype=self.DTYPE, shape=(1, len(seqs)))
def calc_dist_mat(self, seqs: Collection) -> coo_matrix:
"""Calculate the distances between amino acid sequences based on
of all-against-all pairwise sequence alignments.
Parameters
----------
seqs
Array of amino acid sequences
Returns
-------
Upper diagonal distance matrix of normalized alignment distances.
"""
# first, calculate self-alignments. We need them as refererence values
# to turn scores into dists
self_alignment_scores = np.array(
[
parasail.nw_scan_16(
s,
s,
self.gap_open,
self.gap_extend,
parasail.Matrix(self.subst_mat),
).score
for s in seqs
]
)
p = Pool(self.n_jobs)
rows = p.starmap_progress(
self._align_row,
zip(
itertools.repeat(seqs),
itertools.repeat(self_alignment_scores),
range(len(seqs)),
),
chunksize=200,
total=len(seqs),
)
p.close()
score_mat = scipy.sparse.vstack(rows)
score_mat.eliminate_zeros()
assert score_mat.shape[0] == score_mat.shape[1]
return score_mat
def tcr_dist(
unique_seqs,
*,
metric: Union[
Literal["alignment", "identity", "levenshtein"], _DistanceCalculator
] = "identity",
cutoff: float = 2,
n_jobs: Union[int, None] = None,
):
"""calculate the sequence x sequence distance matrix"""
if isinstance(metric, _DistanceCalculator):
dist_calc = metric
elif metric == "alignment":
dist_calc = _AlignmentDistanceCalculator(cutoff=cutoff, n_jobs=n_jobs)
elif metric == "identity":
dist_calc = _IdentityDistanceCalculator(cutoff=cutoff)
elif metric == "levenshtein":
dist_calc = _LevenshteinDistanceCalculator(cutoff=cutoff, n_jobs=n_jobs)
else:
raise ValueError("Invalid distance metric.")
dist_mat = dist_calc.calc_dist_mat(unique_seqs)
return dist_mat
class TcrNeighbors:
def __init__(
self,
adata: AnnData,
*,
metric: Literal["alignment", "identity", "levenshtein"] = "identity",
cutoff: float = 0,
receptor_arms: Literal["TRA", "TRB", "all", "any"] = "all",
dual_tcr: Literal["primary_only", "all", "any"] = "primary_only",
sequence: Literal["aa", "nt"] = "aa",
):
"""Class to compute Neighborhood graphs of CDR3 sequences.
For documentation of the parameters, see :func:`tcr_neighbors`.
"""
if metric == "identity" and cutoff != 0:
raise ValueError("Identity metric only works with cutoff = 0")
if sequence == "nt" and metric == "alignment":
raise ValueError(
"Using nucleotide sequences with alignment metric is not supported. "
)
self.adata = adata
self.metric = metric
self.cutoff = cutoff
self.receptor_arms = receptor_arms
self.dual_tcr = dual_tcr
self.sequence = sequence
self._build_index_dict()
self._dist_mat = None
logging.debug("Finished initalizing TcrNeighbors object. ")
@staticmethod
def _seq_to_cell_idx(
unique_seqs: np.ndarray, cdr_seqs: np.ndarray
) -> Dict[int, List[int]]:
"""
Compute sequence to cell index for a single chain (e.g. `TRA_1`).
Maps cell_idx -> [list, of, seq_idx].
Useful to build a cell x cell matrix from a seq x seq matrix.
Computes magic lookup indexes in linear time.
Parameters
----------
unique_seqs
Pool of all unique cdr3 sequences (length = #unique cdr3 sequences)
cdr_seqs
CDR3 sequences for the current chain (length = #cells)
Returns
-------
Sequence2Cell mapping
"""
# 1) reverse mapping of amino acid sequence to index in sequence-distance matrix
seq_to_index = {seq: i for i, seq in enumerate(unique_seqs)}
# 2) indices of cells in adata that have a CDR3 sequence.
cells_with_chain = np.where(~_is_na(cdr_seqs))[0]
# 3) indices of the corresponding sequences in the distance matrix.
seq_inds = {
chain_id: seq_to_index[cdr_seqs[chain_id]] for chain_id in cells_with_chain
}
# 4) list of cell-indices in the cell distance matrix for each sequence
seq_to_cell = {seq_id: list() for seq_id in seq_to_index.values()}
for cell_id in cells_with_chain:
seq_id = seq_inds[cell_id]
seq_to_cell[seq_id].append(cell_id)
return seq_to_cell
def _build_index_dict(self):
"""Build nested dictionary for each receptor arm (TRA, TRB) containing all
combinations of receptor_arms x primary/secondary_chain
If the merge mode for either `receptor_arm` or `dual_tcr` is `all`,
includes a lookup table that contains the number of CDR3 sequences for
each cell.
"""
receptor_arms = (
["TRA", "TRB"]
if self.receptor_arms not in ["TRA", "TRB"]
else [self.receptor_arms]
)
chain_inds = [1] if self.dual_tcr == "primary_only" else [1, 2]
sequence = "" if self.sequence == "aa" else "_nt"
arm_dict = {}
for arm in receptor_arms:
cdr_seqs = {
k: self.adata.obs[f"{arm}_{k}_cdr3{sequence}"].values
for k in chain_inds
}
unique_seqs = np.hstack(list(cdr_seqs.values()))
unique_seqs = np.unique(unique_seqs[~_is_na(unique_seqs)]).astype(str)
seq_to_cell = {
k: self._seq_to_cell_idx(unique_seqs, cdr_seqs[k]) for k in chain_inds
}
arm_dict[arm] = {
"chain_inds": chain_inds,
"unique_seqs": unique_seqs,
"seq_to_cell": seq_to_cell,
}
# need the count of chains per cell for the `all` strategies.
if self.receptor_arms == "all" or self.dual_tcr == "all":
arm_dict[arm]["chains_per_cell"] = np.sum(
~_is_na(
self.adata.obs.loc[
:, [f"{arm}_{k}_cdr3{sequence}" for k in chain_inds]
]
),
axis=1,
)
self.index_dict = arm_dict
def _reduce_dual_all(self, d, chain, cell_row, cell_col):
"""Reduce dual TCRs into a single value when 'all' sequences
need to match. This requires additional checking effort for the number
of chains in the given cell, since we can't make the distinction between
no chain and dist > cutoff based on the distances (both would contain a
0 in the distance matrix)."""
chain_count = (
self.index_dict[chain]["chains_per_cell"][cell_row],
self.index_dict[chain]["chains_per_cell"][cell_col],
)
if len(d) == 1 and chain_count == (1, 1):
# exactely one chain for both cells -> return that value
return next(iter(d.values()))
elif chain_count == (2, 2):
# two options: either (1 matches 2 and 2 matches 1)
# or (1 matches 1 and 2 matches 2).
try:
# minus 1, because both dists are offseted by 1.
d1 = d[(1, 2)] + d[(2, 1)] - 1
except KeyError:
d1 = None
try:
d2 = d[(1, 1)] + d[(2, 2)] - 1
except KeyError:
d2 = None
if d1 is not None and d2 is not None:
return min(d1, d2)
elif d1 is not None:
return d1
elif d2 is not None:
return d2
else:
return 0
else:
return 0
def _reduce_arms_all(self, values, cell_row, cell_col):
"""Reduce multiple receptor arms into a single value when 'all' sequences
need to match. This requires additional checking effort for teh number
of chains in the given cell, since we can't make the distinction between
no chain and dist > cutoff based on the distances (both would contain a
0 in the distance matrix)."""
values = (x for x in values if x != 0)
try:
arm1 = next(values)
except StopIteration:
# no value > 0
return 0
try:
arm2 = next(values)
# two receptor arms -> easy
# -1 because both distances are offseted by 1
return arm1 + arm2 - 1
except StopIteration:
# only one arm
tra_chains = (
self.index_dict["TRA"]["chains_per_cell"][cell_row],
self.index_dict["TRA"]["chains_per_cell"][cell_col],
)
trb_chains = (
self.index_dict["TRB"]["chains_per_cell"][cell_row],
self.index_dict["TRB"]["chains_per_cell"][cell_col],
)
# Either exactely one chain for TRA or
# exactely on e chain for TRB for both cells.
if tra_chains == (0, 0) or trb_chains == (0, 0):
return arm1
else:
return 0
@staticmethod
def _reduce_arms_any(lst, *args):
"""Reduce arms when *any* of the sequences needs to match.
This is the simpler case. This also works with only one entry
(e.g. arms = "TRA") """
# need to exclude 0 values, since the dist mat is offseted by 1.
try:
return min(x for x in lst if x != 0)
except ValueError:
# no values in generator
return 0
@staticmethod
def _reduce_dual_any(d, *args):
"""Reduce dual tcrs to a single value when *any* of the sequences needs
to match (by minimum). This also works with only one entry (i.e. 'primary only')
"""
# need to exclude 0 values, since the dist mat is offseted by 1.
try:
return min(x for x in d.values() if x != 0)
except ValueError:
# no values in generator
return 0
def _reduce_coord_dict(self, coord_dict):
"""Applies reduction functions to the coord dict.
Yield (coords, value) pairs. """
reduce_dual = (
self._reduce_dual_all if self.dual_tcr == "all" else self._reduce_dual_any
)
reduce_arms = (
self._reduce_arms_all
if self.receptor_arms == "all"
else self._reduce_arms_any
)
for (cell_row, cell_col), entry in coord_dict.items():
reduced_dual = (
reduce_dual(value_dict, chain, cell_row, cell_col)
for chain, value_dict in entry.items()
)
reduced = reduce_arms(reduced_dual, cell_row, cell_col,)
yield (cell_row, cell_col), reduced
def _cell_dist_mat_reduce(self):
"""Compute the distance matrix by using custom reduction functions.
More flexible than `_build_cell_dist_mat_min`, but requires more memory.
Reduce dual is called before reduce arms.
"""
coord_dict = dict()
def _add_to_dict(d, c1, c2, cell_row, cell_col, value):
"""Add a value to the nested coord dict"""
try:
tmp_dict = d[(cell_row, cell_col)]
try:
tmp_dict2 = tmp_dict[arm]
try:
if (c1, c2) in tmp_dict2:
# can be in arbitrary order apprarently
assert (c2, c1) not in tmp_dict2
tmp_dict2[(c2, c1)] = value
tmp_dict2[(c1, c2)] = value
except KeyError:
tmp_dict2 = {(c1, c2): value}
except KeyError:
tmp_dict[arm] = {(c1, c2): value}
except KeyError:
d[(cell_row, cell_col)] = {arm: {(c1, c2): value}}
for arm, arm_info in self.index_dict.items():
dist_mat, seq_to_cell, chain_inds = (
arm_info["dist_mat"],
arm_info["seq_to_cell"],
arm_info["chain_inds"],
)
for row, col, value in zip(dist_mat.row, dist_mat.col, dist_mat.data):
for c1, c2 in itertools.product(chain_inds, repeat=2):
for cell_row, cell_col in itertools.product(
seq_to_cell[c1][row], seq_to_cell[c2][col]
):
# fill upper diagonal. Important: these are dist-mat row,cols
# not cell-mat row cols. This is required, because the
# itertools.product returns all combinations for the diagonal
# but not for the other values.
_add_to_dict(coord_dict, c1, c2, cell_row, cell_col, value)
if row != col:
_add_to_dict(coord_dict, c1, c2, cell_col, cell_row, value)
logging.debug("Finished constructing coord-dictionary")
yield from self._reduce_coord_dict(coord_dict)
def compute_distances(
self, n_jobs: Union[int, None] = None,
):
"""Computes the distances between CDR3 sequences
Parameters
----------
j_jobs
Number of CPUs to use for alignment and levenshtein distance.
Default: use all CPUS.
"""
for arm, arm_dict in self.index_dict.items():
arm_dict["dist_mat"] = tcr_dist(
arm_dict["unique_seqs"],
metric=self.metric,
cutoff=self.cutoff,
n_jobs=n_jobs,
)
logging.info("Finished computing {} pairwise distances.".format(arm))
coords, values = zip(*self._cell_dist_mat_reduce())
rows, cols = zip(*coords)
dist_mat = coo_matrix(
(values, (rows, cols)), shape=(self.adata.n_obs, self.adata.n_obs)
)
logging.info("Finished constructing cell x cell distance matrix. ")
dist_mat.eliminate_zeros()
self._dist_mat = dist_mat.tocsr()
@property
def dist(self):
"""The computed distance matrix.
Requires to invoke `compute_distances() first. """
return self._dist_mat
@property
def connectivities(self):
"""Get the weighted adjacecency matrix derived from the distance matrix.
The cutoff will be used to normalize the distances.
"""
if self.cutoff == 0:
return self._dist_mat
connectivities = self._dist_mat.copy()
# actual distances
d = connectivities.data - 1
# structure of the matrix stayes the same, we can safely change the data only
connectivities.data = (self.cutoff - d) / self.cutoff
connectivities.eliminate_zeros()
return connectivities
def tcr_neighbors(
adata: AnnData,
*,
metric: Literal["identity", "alignment", "levenshtein"] = "alignment",
cutoff: int = 2,
receptor_arms: Literal["TRA", "TRB", "all", "any"] = "all",
dual_tcr: Literal["primary_only", "any", "all"] = "primary_only",
key_added: str = "tcr_neighbors",
sequence: Literal["aa", "nt"] = "aa",
inplace: bool = True,
n_jobs: Union[int, None] = None,
) -> Union[Tuple[csr_matrix, csr_matrix], None]:
"""Construct a cell x cell neighborhood graph based on CDR3 sequence
similarity.
Parameters
----------
adata
annotated data matrix
metric
"identity" = Calculate 0/1 distance based on sequence identity. Equals a
cutoff of 0.
"alignment" - Calculate distance using pairwise sequence alignment
and BLOSUM62 matrix
"levenshtein" - Levenshtein edit distance
cutoff
Two cells with a distance <= the cutoff will be connected.
If cutoff = 0, the CDR3 sequences need to be identical. In this
case, no alignment is performed.
receptor_arms:
"TRA" - only consider TRA sequences
"TRB" - only consider TRB sequences
"all" - both TRA and TRB need to match
"any" - either TRA or TRB need to match
dual_tcr:
"primary_only" - only consider most abundant pair of TRA/TRB chains
"any" - consider both pairs of TRA/TRB sequences. Distance must be below
cutoff for any of the chains.
"all" - consider both pairs of TRA/TRB sequences. Distance must be below
cutoff for all of the chains.
key_added:
dict key under which the result will be stored in `adata.uns["scirpy"]`
when `inplace` is True.
sequence:
Use amino acid (aa) or nulceotide (nt) sequences to define clonotype?
inplace:
If True, store the results in adata.uns. If False, returns
the results.
n_jobs:
Number of cores to use for alignment and levenshtein distance.
Returns
-------
connectivities
weighted adjacency matrix
dist
cell x cell distance matrix with the distances as computed according to `metric`
offsetted by 1 to make use of sparse matrices.
"""
if cutoff == 0:
metric = "identity"
ad = TcrNeighbors(
adata,
metric=metric,
cutoff=cutoff,
receptor_arms=receptor_arms,
dual_tcr=dual_tcr,
sequence=sequence,
)
ad.compute_distances(n_jobs)
logging.debug("Finished converting distances to connectivities. ")
if not inplace:
return ad.connectivities, ad.dist
else:
adata.uns[key_added] = dict()
adata.uns[key_added]["params"] = {
"metric": metric,
"cutoff": cutoff,
"dual_tcr": dual_tcr,
"receptor_arms": receptor_arms,
}
adata.uns[key_added]["connectivities"] = ad.connectivities
adata.uns[key_added]["distances"] = ad.dist
| [
"parasail.Matrix",
"scanpy.logging.debug",
"parasail.nw_scan_profile_16",
"itertools.product",
"numpy.min",
"Levenshtein.distance",
"scipy.sparse.coo_matrix",
"scanpy.logging.info",
"parasail.profile_create_16",
"itertools.repeat"
] | [((24130, 24196), 'scanpy.logging.debug', 'logging.debug', (['"""Finished converting distances to connectivities. """'], {}), "('Finished converting distances to connectivities. ')\n", (24143, 24196), False, 'from scanpy import logging\n'), ((2977, 3044), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(d, (row, col))'], {'dtype': 'self.DTYPE', 'shape': '(1, seqs.size)'}), '((d, (row, col)), dtype=self.DTYPE, shape=(1, seqs.size))\n', (2987, 3044), False, 'from scipy.sparse import coo_matrix, csr_matrix, lil_matrix\n'), ((5560, 5591), 'parasail.Matrix', 'parasail.Matrix', (['self.subst_mat'], {}), '(self.subst_mat)\n', (5575, 5591), False, 'import parasail\n'), ((5639, 5684), 'parasail.profile_create_16', 'parasail.profile_create_16', (['target', 'subst_mat'], {}), '(target, subst_mat)\n', (5665, 5684), False, 'import parasail\n'), ((9561, 9620), 'scanpy.logging.debug', 'logging.debug', (['"""Finished initalizing TcrNeighbors object. """'], {}), "('Finished initalizing TcrNeighbors object. ')\n", (9574, 9620), False, 'from scanpy import logging\n'), ((19682, 19737), 'scanpy.logging.debug', 'logging.debug', (['"""Finished constructing coord-dictionary"""'], {}), "('Finished constructing coord-dictionary')\n", (19695, 19737), False, 'from scanpy import logging\n'), ((20564, 20642), 'scipy.sparse.coo_matrix', 'coo_matrix', (['(values, (rows, cols))'], {'shape': '(self.adata.n_obs, self.adata.n_obs)'}), '((values, (rows, cols)), shape=(self.adata.n_obs, self.adata.n_obs))\n', (20574, 20642), False, 'from scipy.sparse import coo_matrix, csr_matrix, lil_matrix\n'), ((20673, 20740), 'scanpy.logging.info', 'logging.info', (['"""Finished constructing cell x cell distance matrix. """'], {}), "('Finished constructing cell x cell distance matrix. ')\n", (20685, 20740), False, 'from scanpy import logging\n'), ((2773, 2801), 'Levenshtein.distance', 'levenshtein_dist', (['target', 's2'], {}), '(target, s2)\n', (2789, 2801), True, 'from Levenshtein import distance as levenshtein_dist\n'), ((3219, 3241), 'itertools.repeat', 'itertools.repeat', (['seqs'], {}), '(seqs)\n', (3235, 3241), False, 'import itertools\n'), ((5800, 5872), 'parasail.nw_scan_profile_16', 'parasail.nw_scan_profile_16', (['profile', 's2', 'self.gap_open', 'self.gap_extend'], {}), '(profile, s2, self.gap_open, self.gap_extend)\n', (5827, 5872), False, 'import parasail\n'), ((5939, 5980), 'numpy.min', 'np.min', (['self_alignment_scores[[i_row, j]]'], {}), '(self_alignment_scores[[i_row, j]])\n', (5945, 5980), True, 'import numpy as np\n'), ((7255, 7277), 'itertools.repeat', 'itertools.repeat', (['seqs'], {}), '(seqs)\n', (7271, 7277), False, 'import itertools\n'), ((7295, 7334), 'itertools.repeat', 'itertools.repeat', (['self_alignment_scores'], {}), '(self_alignment_scores)\n', (7311, 7334), False, 'import itertools\n'), ((18959, 18998), 'itertools.product', 'itertools.product', (['chain_inds'], {'repeat': '(2)'}), '(chain_inds, repeat=2)\n', (18976, 18998), False, 'import itertools\n'), ((19046, 19107), 'itertools.product', 'itertools.product', (['seq_to_cell[c1][row]', 'seq_to_cell[c2][col]'], {}), '(seq_to_cell[c1][row], seq_to_cell[c2][col])\n', (19063, 19107), False, 'import itertools\n'), ((7016, 7047), 'parasail.Matrix', 'parasail.Matrix', (['self.subst_mat'], {}), '(self.subst_mat)\n', (7031, 7047), False, 'import parasail\n')] |
# =========================================================================
# (c) Copyright 2019
# All rights reserved
# Programs written by <NAME>
# Department of Computer Science
# New Jersey Institute of Technology
# University Heights, Newark, NJ 07102, USA
#
# Permission to use, copy, modify, and distribute this
# software and its documentation for any purpose and without
# fee is hereby granted, provided that this copyright
# notice appears in all copies. Programmer(s) makes no
# representations about the suitability of this
# software for any purpose. It is provided "as is" without
# express or implied warranty.
# =========================================================================
import pandas as pd
from keras.utils import np_utils
from sklearn.preprocessing import LabelEncoder
from sklearn.utils import class_weight
from keras.models import *
from keras.layers import *
import csv
import sys
import numpy as np
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
try :
import tensorflow as tf
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)
except Exception as e:
print('turn off loggins is not supported')
# data_mean = [9.26048596e+02, 2.58664488e-01, 1.06633638e+02, 5.11085855e-01,
# 6.24011676e+02, 2.04194132e+23, 2.33909547e+01, 1.90406673e+13,
# 3.32675181e+00, 1.32545323e+22, 5.96746073e+03, 1.85633869e-02,
# 2.19502276e-06, 4.75747286e+12]
# data_std = [1.08065295e+03, 7.01180865e-01, 2.02918470e+02, 1.41554237e+00,
# 6.55378540e+02, 3.35568384e+23, 1.57301570e+01, 2.08734375e+13,
# 7.34233192e+00, 1.44636883e+22, 4.10534455e+03, 1.20567656e-01,
# 1.74265529e-05, 7.53463926e+12]
# data_max = [1.42231700e+04, 9.61858226e+00, 4.05506900e+03, 1.80000000e+01,
# 7.21147559e+03, 5.36934000e+24, 7.66870000e+01, 4.81340300e+14,
# 8.70000000e+01, 2.07016000e+23, 2.80475700e+04, 1.05571716e+01,
# 9.30000000e-04, 1.08546200e+14]
# data_min = [2.720000e-01, 0.000000e+00, 1.000000e-03, 0.000000e+00, 6.860500e-02,
# 3.117358e+19, 1.800000e-02, 9.951892e+09, 0.000000e+00, 3.300907e+18,
# 5.640048e+02, 0.000000e+00, 0.000000e+00, 7.529357e+07]
def load_data(datafile, flare_label, series_len, start_feature, n_features, mask_value):
df = pd.read_csv(datafile)
df_values = df.values
X = []
y = []
tmp = []
for k in range(start_feature, start_feature + n_features):
tmp.append(mask_value)
for idx in range(0, len(df_values)):
each_series_data = []
row = df_values[idx]
label = row[1][0]
if flare_label == 'M' and label == 'X':
label = 'M'
if flare_label == 'M' and (label == 'B' or label == 'C'):
label = 'N'
has_zero_record = False
# if at least one of the 25 physical feature values is missing, then discard it.
if flare_label == 'M':
for k in range(5, 10):
if float(row[k]) == 0.0:
has_zero_record = True
break
for k in range(13, 16):
if float(row[k]) == 0.0:
has_zero_record = True
break
if float(row[19]) == 0.0:
has_zero_record = True
if float(row[21]) == 0.0:
has_zero_record = True
for k in range(23, 26):
if float(row[k]) == 0.0:
has_zero_record = True
break
if has_zero_record is False:
cur_noaa_num = int(row[3])
each_series_data.append(row[start_feature:start_feature + n_features].tolist())
itr_idx = idx - 1
while itr_idx >= 0 and len(each_series_data) < series_len:
prev_row = df_values[itr_idx]
prev_noaa_num = int(prev_row[3])
if prev_noaa_num != cur_noaa_num:
break
has_zero_record_tmp = False
if flare_label == 'M':
for k in range(5, 10):
if float(row[k]) == 0.0:
has_zero_record_tmp = True
break
for k in range(13, 16):
if float(row[k]) == 0.0:
has_zero_record_tmp = True
break
if float(row[19]) == 0.0:
has_zero_record_tmp = True
if float(row[21]) == 0.0:
has_zero_record_tmp = True
for k in range(23, 26):
if float(row[k]) == 0.0:
has_zero_record_tmp = True
break
if len(each_series_data) < series_len and has_zero_record_tmp is True:
each_series_data.insert(0, tmp)
if len(each_series_data) < series_len and has_zero_record_tmp is False:
each_series_data.insert(0, prev_row[start_feature:start_feature + n_features].tolist())
itr_idx -= 1
while len(each_series_data) > 0 and len(each_series_data) < series_len:
each_series_data.insert(0, tmp)
if len(each_series_data) > 0:
X.append(np.array(each_series_data).reshape(series_len, n_features).tolist())
y.append(label)
X_arr = np.array(X)
y_arr = np.array(y)
print(X_arr.shape)
return X_arr, y_arr
def data_transform(data):
encoder = LabelEncoder()
encoder.fit(data)
encoded_Y = encoder.transform(data)
converteddata = np_utils.to_categorical(encoded_Y)
return converteddata
def attention_3d_block(hidden_states, series_len):
hidden_size = int(hidden_states.shape[2])
hidden_states_t = Permute((2, 1), name='attention_input_t')(hidden_states)
hidden_states_t = Reshape((hidden_size, series_len), name='attention_input_reshape')(hidden_states_t)
score_first_part = Dense(series_len, use_bias=False, name='attention_score_vec')(hidden_states_t)
score_first_part_t = Permute((2, 1), name='attention_score_vec_t')(score_first_part)
h_t = Lambda(lambda x: x[:, :, -1], output_shape=(hidden_size, 1), name='last_hidden_state')(hidden_states_t)
score = dot([score_first_part_t, h_t], [2, 1], name='attention_score')
attention_weights = Activation('softmax', name='attention_weight')(score)
context_vector = dot([hidden_states_t, attention_weights], [2, 1], name='context_vector')
context_vector = Reshape((hidden_size,))(context_vector)
h_t = Reshape((hidden_size,))(h_t)
pre_activation = concatenate([context_vector, h_t], name='attention_output')
attention_vector = Dense(hidden_size, use_bias=False, activation='tanh', name='attention_vector')(pre_activation)
return attention_vector
def lstm(nclass, n_features, series_len):
inputs = Input(shape=(series_len, n_features,))
lstm_out = LSTM(10, return_sequences=True, dropout=0.5)(inputs)
attention_mul = attention_3d_block(lstm_out, series_len)
layer1_out = Dense(200, activation='relu')(attention_mul)
layer2_out = Dense(500, activation='relu')(layer1_out)
output = Dense(nclass, activation='softmax', activity_regularizer=regularizers.l2(0.0001))(layer2_out)
model = Model(input=[inputs], output=output)
return model
if __name__ == '__main__':
flare_label = sys.argv[1]
train_again = int(sys.argv[2])
filepath = './'
n_features = 0
if flare_label == 'M':
n_features = 22
start_feature = 5
mask_value = 0
series_len = 10
epochs = 7
batch_size = 256
nclass = 2
result_file = './output.csv'
if train_again == 1:
# Train
X_train_data, y_train_data = load_data(datafile=filepath + 'normalized_training.csv',
flare_label=flare_label, series_len=series_len,
start_feature=start_feature, n_features=n_features,
mask_value=mask_value)
X_train = np.array(X_train_data)
y_train = np.array(y_train_data)
y_train_tr = data_transform(y_train)
class_weights = class_weight.compute_class_weight('balanced',
np.unique(y_train), y_train)
class_weight_ = {0: class_weights[0], 1: class_weights[1]}
# print(class_weight_)
model = lstm(nclass, n_features, series_len)
model.compile(loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
history = model.fit(X_train, y_train_tr,
epochs=epochs, batch_size=batch_size,
verbose=False, shuffle=True, class_weight=class_weight_)
model.save('./model.h5')
else:
model = load_model('./model.h5')
# Test
X_test_data, y_test_data = load_data(datafile=filepath + 'normalized_testing.csv',
flare_label=flare_label, series_len=series_len,
start_feature=start_feature, n_features=n_features,
mask_value=mask_value)
X_test = np.array(X_test_data)
y_test = np.array(y_test_data)
y_test_tr = data_transform(y_test)
classes = model.predict(X_test, batch_size=batch_size, verbose=0, steps=None)
with open(result_file, 'w', encoding='UTF-8') as result_csv:
w = csv.writer(result_csv)
with open(filepath + 'normalized_testing.csv', encoding='UTF-8') as data_csv:
reader = csv.reader(data_csv)
i = -1
for line in reader:
if i == -1:
line.insert(0, 'Predicted Label')
else:
if classes[i][0] >= 0.6:
line.insert(0, 'Positive')
else:
line.insert(0, 'Negative')
i += 1
w.writerow(line)
| [
"sklearn.preprocessing.LabelEncoder",
"numpy.unique",
"pandas.read_csv",
"csv.writer",
"tensorflow.compat.v1.logging.set_verbosity",
"numpy.array",
"keras.utils.np_utils.to_categorical",
"csv.reader"
] | [((1054, 1116), 'tensorflow.compat.v1.logging.set_verbosity', 'tf.compat.v1.logging.set_verbosity', (['tf.compat.v1.logging.ERROR'], {}), '(tf.compat.v1.logging.ERROR)\n', (1088, 1116), True, 'import tensorflow as tf\n'), ((2370, 2391), 'pandas.read_csv', 'pd.read_csv', (['datafile'], {}), '(datafile)\n', (2381, 2391), True, 'import pandas as pd\n'), ((5521, 5532), 'numpy.array', 'np.array', (['X'], {}), '(X)\n', (5529, 5532), True, 'import numpy as np\n'), ((5545, 5556), 'numpy.array', 'np.array', (['y'], {}), '(y)\n', (5553, 5556), True, 'import numpy as np\n'), ((5646, 5660), 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), '()\n', (5658, 5660), False, 'from sklearn.preprocessing import LabelEncoder\n'), ((5743, 5777), 'keras.utils.np_utils.to_categorical', 'np_utils.to_categorical', (['encoded_Y'], {}), '(encoded_Y)\n', (5766, 5777), False, 'from keras.utils import np_utils\n'), ((9434, 9455), 'numpy.array', 'np.array', (['X_test_data'], {}), '(X_test_data)\n', (9442, 9455), True, 'import numpy as np\n'), ((9469, 9490), 'numpy.array', 'np.array', (['y_test_data'], {}), '(y_test_data)\n', (9477, 9490), True, 'import numpy as np\n'), ((8233, 8255), 'numpy.array', 'np.array', (['X_train_data'], {}), '(X_train_data)\n', (8241, 8255), True, 'import numpy as np\n'), ((8274, 8296), 'numpy.array', 'np.array', (['y_train_data'], {}), '(y_train_data)\n', (8282, 8296), True, 'import numpy as np\n'), ((9691, 9713), 'csv.writer', 'csv.writer', (['result_csv'], {}), '(result_csv)\n', (9701, 9713), False, 'import csv\n'), ((8471, 8489), 'numpy.unique', 'np.unique', (['y_train'], {}), '(y_train)\n', (8480, 8489), True, 'import numpy as np\n'), ((9821, 9841), 'csv.reader', 'csv.reader', (['data_csv'], {}), '(data_csv)\n', (9831, 9841), False, 'import csv\n'), ((5408, 5434), 'numpy.array', 'np.array', (['each_series_data'], {}), '(each_series_data)\n', (5416, 5434), True, 'import numpy as np\n')] |
# ######################################################################
# Copyright (c) 2014, Brookhaven Science Associates, Brookhaven #
# National Laboratory. All rights reserved. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided that the following conditions #
# are met: #
# #
# * Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# #
# * Redistributions in binary form must reproduce the above copyright #
# notice this list of conditions and the following disclaimer in #
# the documentation and/or other materials provided with the #
# distribution. #
# #
# * Neither the name of the Brookhaven Science Associates, Brookhaven #
# National Laboratory nor the names of its contributors may be used #
# to endorse or promote products derived from this software without #
# specific prior written permission. #
# #
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS #
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT #
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS #
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE #
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, #
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES #
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) #
# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, #
# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING #
# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE #
# POSSIBILITY OF SUCH DAMAGE. #
########################################################################
"""
This is the module for calibration functions and data
"""
from __future__ import absolute_import, division, print_function
from collections import deque
import numpy as np
import scipy.signal
from .constants import calibration_standards
from .feature import (filter_peak_height, peak_refinement,
refine_log_quadratic)
from .utils import (angle_grid, radial_grid,
pairwise, bin_edges_to_centers, bin_1D)
def estimate_d_blind(name, wavelength, bin_centers, ring_average,
window_size, max_peak_count, thresh):
"""
Estimate the sample-detector distance
Given a radially integrated calibration image return an estimate for
the sample-detector distance. This function does not require a
rough estimate of what d should be.
For the peaks found the detector-sample distance is estimated via
.. math ::
D = \\frac{r}{\\tan 2\\theta}
where :math:`r` is the distance in mm from the calibrated center
to the ring on the detector and :math:`D` is the distance from
the sample to the detector.
Parameters
----------
name : str
The name of the calibration standard. Used to look up the
expected peak location
For valid options, see the name attribute on this function
wavelength : float
The wavelength of scattered x-ray in nm
bin_centers : array
The distance from the calibrated center to the center of
the ring's annulus in mm
ring_average : array
The average intensity in the given ring of a azimuthally integrated
powder pattern. In counts [arb]
window_size : int
The number of elements on either side of a local maximum to
use for locating and refining peaks. Candidates are identified
as a relative maximum in a window sized (2*window_size + 1) and
the same window is used for fitting the peaks to refine the location.
max_peak_count : int
Use at most this many peaks
thresh : float
Fraction of maximum peak height
Returns
-------
dist_sample : float
The detector-sample distance in mm. This is the mean of the estimate
from all of the peaks used.
std_dist_sample : float
The standard deviation of d computed from the peaks used.
"""
# get the calibration standard
cal = calibration_standards[name]
# find the local maximums
cands = scipy.signal.argrelmax(ring_average, order=window_size)[0]
# filter local maximums by size
cands = filter_peak_height(ring_average, cands,
thresh*np.max(ring_average), window=window_size)
# TODO insert peak identification validation. This might be better than
# improving the threshold value.
# refine the locations of the peaks
peaks_x, peaks_y = peak_refinement(bin_centers, ring_average, cands,
window_size, refine_log_quadratic)
# compute tan(2theta) for the expected peaks
tan2theta = np.tan(cal.convert_2theta(wavelength))
# figure out how many peaks we can look at
slc = slice(0, np.min([len(tan2theta), len(peaks_x), max_peak_count]))
# estimate the sample-detector distance for each of the peaks
d_array = (peaks_x[slc] / tan2theta[slc])
return np.mean(d_array), np.std(d_array)
# Set an attribute for the calibration names that are valid options. This
# attribute also aids in autowrapping into VisTrails
estimate_d_blind.name = list(calibration_standards)
def refine_center(image, calibrated_center, pixel_size, phi_steps, max_peaks,
thresh, window_size,
nx=None, min_x=None, max_x=None):
"""
Refines the location of the center of the beam.
This relies on being able to see the whole powder pattern.
Parameters
----------
image : ndarray
The image
calibrated_center : tuple
(row, column) the estimated center
pixel_size : tuple
(pixel_height, pixel_width)
phi_steps : int
How many regions to split the ring into, should be >10
max_peaks : int
Number of rings to look it
thresh : float
Fraction of maximum peak height
window_size : int, optional
The window size to use (in bins) to use when refining peaks
nx : int, optional
Number of bins to use for radial binning
min_x : float, optional
The minimum radius to use for radial binning
max_x : float, optional
The maximum radius to use for radial binning
Returns
-------
calibrated_center : tuple
The refined calibrated center.
"""
if nx is None:
nx = int(np.mean(image.shape) * 2)
phi = angle_grid(calibrated_center, image.shape, pixel_size).ravel()
r = radial_grid(calibrated_center, image.shape, pixel_size).ravel()
I = image.ravel()
phi_steps = np.linspace(-np.pi, np.pi, phi_steps, endpoint=True)
out = deque()
for phi_start, phi_end in pairwise(phi_steps):
mask = (phi <= phi_end) * (phi > phi_start)
out.append(bin_1D(r[mask], I[mask],
nx=nx, min_x=min_x, max_x=max_x))
out = list(out)
ring_trace = []
for bins, b_sum, b_count in out:
mask = b_sum > 10
avg = b_sum[mask] / b_count[mask]
bin_centers = bin_edges_to_centers(bins)[mask]
cands = scipy.signal.argrelmax(avg, order=window_size)[0]
# filter local maximums by size
cands = filter_peak_height(avg, cands, thresh*np.max(avg),
window=window_size)
ring_trace.append(bin_centers[cands[:max_peaks]])
tr_len = [len(rt) for rt in ring_trace]
mm = np.min(tr_len)
ring_trace = np.vstack([rt[:mm] for rt in ring_trace]).T
mean_dr = np.mean(ring_trace - np.mean(ring_trace, axis=1, keepdims=True),
axis=0)
phi_centers = bin_edges_to_centers(phi_steps)
delta = np.mean(np.diff(phi_centers))
# this is doing just one term of a Fourier series
# note that we have to convert _back_ to pixels from real units
# TODO do this with better integration/handle repeat better
col_shift = (np.sum(np.sin(phi_centers) * mean_dr) *
delta / (np.pi * pixel_size[1]))
row_shift = (np.sum(np.cos(phi_centers) * mean_dr) *
delta / (np.pi * pixel_size[0]))
return tuple(np.array(calibrated_center) +
np.array([row_shift, col_shift]))
| [
"numpy.mean",
"collections.deque",
"numpy.min",
"numpy.diff",
"numpy.max",
"numpy.array",
"numpy.linspace",
"numpy.vstack",
"numpy.cos",
"numpy.std",
"numpy.sin"
] | [((7431, 7483), 'numpy.linspace', 'np.linspace', (['(-np.pi)', 'np.pi', 'phi_steps'], {'endpoint': '(True)'}), '(-np.pi, np.pi, phi_steps, endpoint=True)\n', (7442, 7483), True, 'import numpy as np\n'), ((7494, 7501), 'collections.deque', 'deque', ([], {}), '()\n', (7499, 7501), False, 'from collections import deque\n'), ((8251, 8265), 'numpy.min', 'np.min', (['tr_len'], {}), '(tr_len)\n', (8257, 8265), True, 'import numpy as np\n'), ((5831, 5847), 'numpy.mean', 'np.mean', (['d_array'], {}), '(d_array)\n', (5838, 5847), True, 'import numpy as np\n'), ((5849, 5864), 'numpy.std', 'np.std', (['d_array'], {}), '(d_array)\n', (5855, 5864), True, 'import numpy as np\n'), ((8283, 8324), 'numpy.vstack', 'np.vstack', (['[rt[:mm] for rt in ring_trace]'], {}), '([rt[:mm] for rt in ring_trace])\n', (8292, 8324), True, 'import numpy as np\n'), ((8509, 8529), 'numpy.diff', 'np.diff', (['phi_centers'], {}), '(phi_centers)\n', (8516, 8529), True, 'import numpy as np\n'), ((5139, 5159), 'numpy.max', 'np.max', (['ring_average'], {}), '(ring_average)\n', (5145, 5159), True, 'import numpy as np\n'), ((8363, 8405), 'numpy.mean', 'np.mean', (['ring_trace'], {'axis': '(1)', 'keepdims': '(True)'}), '(ring_trace, axis=1, keepdims=True)\n', (8370, 8405), True, 'import numpy as np\n'), ((8949, 8976), 'numpy.array', 'np.array', (['calibrated_center'], {}), '(calibrated_center)\n', (8957, 8976), True, 'import numpy as np\n'), ((8996, 9028), 'numpy.array', 'np.array', (['[row_shift, col_shift]'], {}), '([row_shift, col_shift])\n', (9004, 9028), True, 'import numpy as np\n'), ((7220, 7240), 'numpy.mean', 'np.mean', (['image.shape'], {}), '(image.shape)\n', (7227, 7240), True, 'import numpy as np\n'), ((8071, 8082), 'numpy.max', 'np.max', (['avg'], {}), '(avg)\n', (8077, 8082), True, 'import numpy as np\n'), ((8741, 8760), 'numpy.sin', 'np.sin', (['phi_centers'], {}), '(phi_centers)\n', (8747, 8760), True, 'import numpy as np\n'), ((8848, 8867), 'numpy.cos', 'np.cos', (['phi_centers'], {}), '(phi_centers)\n', (8854, 8867), True, 'import numpy as np\n')] |
# Copyright 2018 Uber Technologies, Inc. All Rights Reserved.
# Modifications copyright (C) 2019 Intel Corporation
# Modifications copyright (C) 2020, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from distutils.version import LooseVersion
import inspect
import itertools
import os
import platform
import sys
import unittest
import warnings
import time
import json
from collections.abc import Iterable
import numpy as np
import pytest
import torch
import torch.nn as nn
import torch.nn.functional as F
import horovod.torch as hvd
sys.path.append(os.path.join(os.path.dirname(__file__), os.pardir, 'utils'))
from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath
_1_5_api = LooseVersion(torch.__version__) >= LooseVersion('1.5.0')
ccl_supported_types = set([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor])
# Set environment variable for dynamic timeline API test
os.environ["HOROVOD_TIMELINE"] = "DYNAMIC"
class TorchTests(unittest.TestCase):
"""
Tests for ops in horovod.torch.
"""
def __init__(self, *args, **kwargs):
super(TorchTests, self).__init__(*args, **kwargs)
warnings.simplefilter('module')
def convert_cpu_fp16_to_fp32(self, *values):
# PyTorch doesn't support any CPU ops on FP16 tensors.
# In case we need to do ops, we will convert tensor to FP32 here.
result = []
for value in values:
if value.dtype in [torch.float16, torch.HalfTensor] and not value.is_cuda:
result.append(value.float())
else:
result.append(value)
return result
def cast_and_place(self, tensor, dtype):
if dtype.is_cuda:
return tensor.cuda(hvd.local_rank()).type(dtype)
return tensor.type(dtype)
def filter_supported_types(self, types):
if 'CCL_ROOT' in os.environ:
types = [t for t in types if t in ccl_supported_types]
return types
def test_gpu_required(self):
if not torch.cuda.is_available():
skip_or_fail_gpu_test(self, "No GPUs available")
@pytest.mark.skipif(platform.system() == 'Darwin', reason='Reinit not supported on macOS')
def test_horovod_reinit(self):
"""Test that Horovod can init -> shutdown -> init successfully."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Horovod cannot be re-initialized after shutdown when using MPI, so
# this test can only be done using the Gloo controller
self.skipTest("Gloo is not available")
hvd.init()
rank, size = hvd.rank(), hvd.size()
hvd.shutdown()
hvd.init()
rank2, size2 = hvd.rank(), hvd.size()
assert rank == rank2
assert size == size2
def test_horovod_is_initialized(self):
"""Test that is_initialized returned by hvd.is_initialized() is correct."""
hvd.init()
assert hvd.is_initialized()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
is_mpi = gloo_rank == -1
if is_mpi:
# Only applies for Gloo
self.skipTest("Gloo is not available")
hvd.shutdown()
assert not hvd.is_initialized()
hvd.init()
def test_horovod_rank(self):
"""Test that the rank returned by hvd.rank() is correct."""
mpi_rank, _ = mpi_env_rank_and_size()
gloo_rank = int(os.getenv('HOROVOD_RANK', -1))
# The mpi rank does not match gloo rank, we need to figure which one
# we are using to run the test.
is_mpi = gloo_rank == -1
hvd.init()
rank = hvd.rank()
if is_mpi:
assert mpi_rank == rank
else:
assert gloo_rank == rank
def test_horovod_size(self):
"""Test that the size returned by hvd.size() is correct."""
_, mpi_size = mpi_env_rank_and_size()
gloo_size = int(os.getenv('HOROVOD_SIZE', -1))
# The mpi size does not match gloo size, we need to figure which one
# we are using to run the test.
is_mpi = gloo_size == -1
hvd.init()
size = hvd.size()
if is_mpi:
assert mpi_size == size
else:
assert gloo_size == size
def test_horovod_allreduce(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False)
tensor, summed = self.convert_cpu_fp16_to_fp32(tensor, summed)
multiplied = tensor * size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_average(self):
"""Test that the allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
averaged = hvd.allreduce(tensor, average=True)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged, tensor, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_inplace(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
multiplied = self.cast_and_place(tensor * size, dtype)
tensor = self.cast_and_place(tensor, dtype)
hvd.allreduce_(tensor, average=False)
tensor, multiplied = self.convert_cpu_fp16_to_fp32(tensor, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_async_fused(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allreduce_async(tensor, average=False)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tensor, = self.convert_cpu_fp16_to_fp32(tensor)
multiplied = tensor * size
tests.append((dtype, multiplied, handle))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for dtype, multiplied, handle in tests:
summed = hvd.synchronize(handle)
summed, = self.convert_cpu_fp16_to_fp32(summed)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_multi_gpu(self):
"""Test that the allreduce works on multiple GPUs."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
iter = 0
dtypes = [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
iter += 1
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
device = local_rank * 2 + (iter + local_rank) % 2
tensor = tensor.cuda(device).type(dtype)
multiplied = tensor * size
hvd.allreduce_(tensor, average=False)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(tensor, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_prescale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with prescaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
prescale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = factor * tensor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
multiplied *= size
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_postscale(self):
"""Test that the allreduce correctly sums 1D, 2D, 3D tensors with postscaling."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
int_types = [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]
half_types = [torch.HalfTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
np.random.seed(1234)
factor = np.random.uniform()
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
summed = hvd.allreduce(tensor, average=False,
postscale_factor=factor)
factor = torch.tensor(factor, dtype=torch.float64)
factor = factor.cuda(hvd.local_rank()) if dtype.is_cuda else factor
if dtype.is_cuda and not int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# For integer types, scaling done in FP64
factor = factor.type(torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float64 if dtype in int_types else dtype)
else:
# For integer types, scaling done in FP64, FP32 math for FP16 on CPU
factor = factor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
tensor = tensor.type(torch.float32 if dtype in half_types else
torch.float64 if dtype in int_types else dtype)
multiplied = size * tensor
multiplied = multiplied * factor
multiplied = multiplied.type(dtype)
summed, multiplied = self.convert_cpu_fp16_to_fp32(summed, multiplied)
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in int_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(summed, multiplied, threshold), 'hvd.allreduce produces incorrect results'
def test_horovod_allreduce_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different rank or dimension."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
torch.manual_seed(1234)
dims = [17 + rank] * 3
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
# Same number of elements, different rank
torch.manual_seed(1234)
if rank == 0:
dims = [17, 23 * 57]
else:
dims = [17, 23, 57]
tensor = torch.FloatTensor(*dims).random_(-100, 100)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_type_error(self):
"""Test that the allreduce raises an error if different ranks try to
send tensors of different type."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_cpu_gpu_error(self):
"""Test that the allreduce raises an error if different ranks try to
perform reduction on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if int(os.environ.get('HOROVOD_MIXED_INSTALL', 0)):
# Skip if compiled with CUDA but without HOROVOD_GPU_OPERATIONS.
self.skipTest("Not compiled with HOROVOD_GPU_OPERATIONS")
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Same rank, different dimension
dims = [17] * 3
if rank % 2 == 0:
tensor = torch.cuda.FloatTensor(*dims)
else:
tensor = torch.FloatTensor(*dims)
try:
hvd.allreduce(tensor)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allreduce_duplicate_name_error(self):
"""Test that the allreduce raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allreduce_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allreduce_async(tensor, name='duplicate_name')
assert False, 'hvd.allreduce_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_allreduce_grad(self):
"""Test the correctness of the allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=False)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim) * size
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensor = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
summed = hvd.allreduce(tensor, average=True)
summed.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones([17] * dim)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_grouped_allreduce(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
summed = hvd.grouped_allreduce(tensors, average=False)
tensors, summed = zip(*[self.convert_cpu_fp16_to_fp32(t, s) for t, s in zip(tensors, summed)])
multiplied = [tensor * size for tensor in tensors]
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(summed, multiplied)]), \
'hvd.grouped_allreduce produces incorrect results'
def test_horovod_grouped_allreduce_average(self):
"""Test that the grouped allreduce correctly averages 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
averaged = hvd.grouped_allreduce(tensors, average=True)
tensors, averaged = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, averaged)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(averaged, tensors)]), \
'hvd.grouped_allreduce produces incorrect results for average'
def test_horovod_grouped_allreduce_inplace(self):
"""Test that the grouped allreduce correctly sums 1D, 2D, 3D tensors."""
hvd.init()
size = hvd.size()
dtypes = self.filter_supported_types([torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
multiplied = [self.cast_and_place(tensor * size, dtype) for tensor in tensors]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
hvd.grouped_allreduce_(tensors, average=False)
tensors, multiplied = zip(*[self.convert_cpu_fp16_to_fp32(t, m) for t, m in zip(tensors, multiplied)])
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in [torch.IntTensor, torch.LongTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor]:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert all([torch.allclose(t1, t2, threshold) for t1, t2 in zip(tensors, multiplied)]), \
'hvd.grouped_allreduce_ produces incorrect results'
def test_horovod_grouped_allreduce_cpu_gpu_error(self):
"""Test that the grouped allreduce raises an error if the input tensor
list contains a mix of tensors on CPU and GPU."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
tensors = [torch.FloatTensor(10) if i % 2 else torch.cuda.FloatTensor(10) for i in range(5)]
try:
hvd.grouped_allreduce(tensors, average=False)
assert False, 'hvd.allreduce did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_grouped_allreduce_grad(self):
"""Test the correctness of the grouped allreduce gradient."""
hvd.init()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=False)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim) * size
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allreduce_grad_average(self):
"""Test the correctness of the allreduce averaged gradient."""
hvd.init()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
torch.manual_seed(1234)
tensors = [torch.FloatTensor(*([17] * dim)).random_(-100, 100) for _ in range(5)]
tensors = [self.cast_and_place(tensor, dtype) for tensor in tensors]
for tensor in tensors:
tensor.requires_grad_()
summed = hvd.grouped_allreduce(tensors, average=True)
for s in summed:
s.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grads_out = [tensor.grad.data.cpu().numpy() for tensor in tensors]
expected = np.ones([17] * dim)
for grad_out in grads_out:
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_allgather(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
assert list(gathered.shape) == [17 * size] + [17] * (dim - 1)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == [17] * dim, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_variable_size(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors,
even if those tensors have different sizes along the first dim."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
gathered = hvd.allgather(tensor)
tensor, gathered = self.convert_cpu_fp16_to_fp32(tensor, gathered)
expected_size = sum(tensor_sizes)
assert list(gathered.shape) == [expected_size] + [17] * (dim - 1)
for i in range(size):
rank_size = [tensor_sizes[i]] + [17] * (dim - 1)
rank_tensor = gathered[sum(
tensor_sizes[:i]):sum(tensor_sizes[:i + 1])]
assert list(rank_tensor.shape) == rank_size
assert rank_tensor.data.min() == i
assert rank_tensor.data.max() == i
def test_horovod_allgather_async_fused(self):
"""Test that the allgather correctly gathers 1D, 2D, 3D tensors
with Tensor Fusion."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
tests = []
is_hvd_poll_false_once = False
for dtype, dim in itertools.product(dtypes, dims):
rank_shape = [17] * dim
tensor = torch.FloatTensor(*(rank_shape)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
handle = hvd.allgather_async(tensor)
if not hvd.poll(handle):
is_hvd_poll_false_once = True
tests.append((handle, rank_shape))
# Make sure it's an asynchronous operation.
assert is_hvd_poll_false_once, 'hvd.poll() always returns True, not an async op?'
for handle, rank_shape in tests:
gathered = hvd.synchronize(handle)
gathered, = self.convert_cpu_fp16_to_fp32(gathered)
for i in range(size):
rank_tensor = gathered[i * 17:(i + 1) * 17]
assert list(rank_tensor.shape) == rank_shape, \
'hvd.allgather produces incorrect gathered shape'
assert rank_tensor.data.min() == i, 'hvd.allgather produces incorrect gathered tensor'
assert rank_tensor.data.max() == i, 'hvd.allgather produces incorrect gathered tensor'
def test_horovod_allgather_error(self):
"""Test that the allgather returns an error if any dimension besides
the first is different among the tensors being gathered."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_type_error(self):
"""Test that the allgather returns an error if the types being gathered
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_allgather_duplicate_name_error(self):
"""Test that the allgather raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.allgather_async(tensor, name='duplicate_name')
try:
for i in range(10):
hvd.allgather_async(tensor, name='duplicate_name')
assert False, 'hvd.allgather_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_allgather_grad(self):
"""Test the correctness of the allgather gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
# Support tests up to MPI Size of 35
if size > 35:
break
tensor_sizes = [3, 2, 7, 4, 6, 8, 10] * 5
tensor_sizes = tensor_sizes[:size]
tensor = torch.FloatTensor(
*([tensor_sizes[rank]] + [17] * (dim - 1))).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
grad_list = []
for r, size in enumerate(tensor_sizes):
grad_list.append(self.cast_and_place(
torch.ones([size] + [17] * (dim - 1)), dtype) * r)
grad_ys = torch.cat(grad_list, dim=0)
gathered = hvd.allgather(tensor)
gathered.backward(grad_ys)
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(
[tensor_sizes[rank]] + [17] * (dim - 1)
) * rank
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_broadcast(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
if rank != root_rank:
assert (tensor == root_tensor).max() == 0, \
'hvd.broadcast modifies source tensor'
assert (broadcasted_tensor.data == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_inplace(self):
"""Test that the broadcast correctly broadcasts 1D, 2D, 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dtypes = [torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor, torch.DoubleTensor,
torch.HalfTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
root_tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(root_rank)
tensor = self.cast_and_place(tensor, dtype)
root_tensor = self.cast_and_place(root_tensor, dtype)
broadcasted_tensor = hvd.broadcast_(tensor, root_rank)
tensor, root_tensor, broadcasted_tensor = \
self.convert_cpu_fp16_to_fp32(tensor, root_tensor, broadcasted_tensor)
assert (tensor == broadcasted_tensor).min() == 1, \
'hvd.broadcast does not modify source tensor'
assert (broadcasted_tensor == root_tensor).min() == 1, \
'hvd.broadcast produces incorrect broadcasted tensor'
def test_horovod_broadcast_error(self):
"""Test that the broadcast returns an error if any dimension besides
the first is different among the tensors being broadcasted."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.FloatTensor(*tensor_size).fill_(1).mul_(rank)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_type_error(self):
"""Test that the broadcast returns an error if the types being broadcasted
differ among the processes"""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor_size = [17] * 3
if rank % 2 == 0:
tensor = torch.IntTensor(*tensor_size)
else:
tensor = torch.FloatTensor(*tensor_size)
try:
hvd.broadcast(tensor, 0)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_rank_error(self):
"""Test that the broadcast returns an error if different ranks
specify different root rank."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
tensor = torch.FloatTensor(*([17] * 3)).fill_(1)
try:
hvd.broadcast(tensor, rank)
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_broadcast_duplicate_name_error(self):
"""Test that the broadcast raises an error if there are
two concurrent operations with the same name."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
try:
for i in range(10):
hvd.broadcast_async(tensor, root_rank=0, name='duplicate_name')
assert False, 'hvd.broadcast_async did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_broadcast_grad(self):
"""Test the correctness of the broadcast gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
root_ranks = list(range(size))
for dtype, dim, root_rank in itertools.product(dtypes, dims, root_ranks):
tensor = torch.FloatTensor(*([17] * dim)).fill_(1).mul_(rank)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
broadcasted_tensor = hvd.broadcast(tensor, root_rank)
broadcasted_tensor.backward(self.cast_and_place(torch.ones([17] * dim), dtype))
grad_out = tensor.grad.data.cpu().numpy()
c = 1 if rank == root_rank else 0
expected = np.ones([17] * dim) * c
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall(self):
"""Test that the alltoall correctly distributes 1D, 2D, and 3D tensors."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
tensor = self.cast_and_place(tensor, dtype)
collected, received_splits = hvd.alltoall(tensor, splits)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],
"hvd.alltoall returned incorrect received_splits")
def test_horovod_alltoall_equal_split(self):
"""Test that the alltoall correctly distributes 1D tensors with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
if torch.cuda.is_available():
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
collected = hvd.alltoall(tensor)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
def test_horovod_alltoall_splits_on_gpu(self):
"""Test that the alltoall works correctly when the splits argument is a tensor on GPU."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
dtypes = self.filter_supported_types([torch.ByteTensor, torch.CharTensor, torch.ShortTensor,
torch.IntTensor, torch.LongTensor, torch.FloatTensor,
torch.DoubleTensor, torch.HalfTensor])
dtypes += [torch.cuda.ByteTensor, torch.cuda.CharTensor, torch.cuda.ShortTensor,
torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
splits = torch.tensor([rank + 1] * size, dtype=torch.int32, device="cuda")
tensor = self.cast_and_place(tensor, dtype)
collected, received_splits = hvd.alltoall(tensor, splits)
tensor, collected = self.convert_cpu_fp16_to_fp32(tensor, collected)
assert collected.data.min() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.data.max() == rank, 'hvd.alltoall produces incorrect collected tensor'
assert collected.numel() == size * (size + 1) // 2 * 2**(dim - 1), 'hvd.alltoall collected wrong number of values'
self.assertEqual(received_splits.device.type, "cuda", "received_splits should be on GPU here")
self.assertSequenceEqual(received_splits.tolist(), [rk + 1 for rk in range(size)],
"hvd.alltoall returned incorrect received_splits")
def test_horovod_alltoall_type_error(self):
"""Test that the alltoall returns an error if the tensor types differ
across the processes."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
if rank % 2:
tensor = torch.empty(size, dtype=torch.int32)
else:
tensor = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_equal_split_length_error(self):
"""Test that the alltoall with default splitting returns an error if the tensor length is not a multiple
of the number of workers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size + 1)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_error(self):
"""Test that the alltoall returns an error if the sum of the splits entries exceeds
the first dimension of the input tensor."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size - 1)
splits = torch.ones(size, dtype=torch.int32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_splits_type_error(self):
"""Test that the alltoall returns an error if the splits tensor does not
contain 32-bit integers."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor = torch.empty(size)
splits = torch.empty(size, dtype=torch.float32)
try:
hvd.alltoall(tensor, splits)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, ValueError):
pass
def test_horovod_alltoall_rank_error(self):
"""Test that the alltoall returns an error if any dimension besides
the first is different among the tensors being processed."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
tensor_size = [2 * size] * 3
tensor_size[1] = 10 * (rank + 1)
tensor = torch.ones(tensor_size)
try:
hvd.alltoall(tensor)
assert False, 'hvd.alltoall did not throw error'
except (torch.FatalError, RuntimeError):
pass
def test_horovod_alltoall_grad(self):
"""Test the correctness of the alltoall gradient."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
splits = torch.tensor([rank + 1] * size, dtype=torch.int32)
collected, received_splits = hvd.alltoall(tensor, splits)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_horovod_alltoall_equal_split_grad(self):
"""Test the correctness of the alltoall gradient with default splitting."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if NCCL version < 2.7.0
if hvd.nccl_built() and hvd.nccl_built() < 2700:
self.skipTest("NCCL-based Alltoall requires NCCL version >= 2.7.0.")
# Only Tensors of floating point dtype can require gradients
dtypes = [torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.FloatTensor, torch.cuda.DoubleTensor, torch.cuda.HalfTensor]
dims = [1, 2, 3]
for dtype, dim in itertools.product(dtypes, dims):
vals = []
for i in range(size):
vals += [i] * (rank + 1)
tensor = torch.Tensor(vals)
for _ in range(dim - 1):
tensor = tensor.unsqueeze(1)
tensor = torch.cat((tensor, tensor), dim=1)
tensor = self.cast_and_place(tensor, dtype)
tensor.requires_grad_()
collected = hvd.alltoall(tensor)
collected.backward(self.cast_and_place(torch.ones(collected.shape), dtype))
grad_out = tensor.grad.data.cpu().numpy()
expected = np.ones(tensor.shape)
err = np.linalg.norm(expected - grad_out)
self.assertLess(err, 0.00000001,
"gradient %s differs from expected %s, "
"error: %s" % (grad_out, expected, str(err)))
def test_broadcast_state(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
def create_model(opt_class, opt_params):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
optimizer = new_optimizer(opt_class, opt_params, model)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
return model, optimizer
def get_model_param_values(model):
params = sorted(model.state_dict().items())
return [(k, v.clone()) for k, v in params]
def get_optimizer_param_values(optimizer):
results = []
state_dict = optimizer.state_dict()
for group in state_dict['param_groups']:
for param_id in group['params']:
if param_id not in state_dict['state']:
continue
params = sorted(state_dict['state'][param_id].items())
for k, v in params:
results.append(
(k, v.clone() if torch.is_tensor(v) else v))
return results
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
model_param_values = get_model_param_values(model)
for name, model_param_value in model_param_values:
hvd.broadcast_(model_param_value, root_rank=0)
opt_param_values_updated = []
opt_param_values = get_optimizer_param_values(optimizer)
for name, opt_param_value in opt_param_values:
is_tensor = torch.is_tensor(opt_param_value)
if is_tensor:
hvd.broadcast_(opt_param_value, root_rank=0)
else:
opt_param_value = hvd.broadcast_object(opt_param_value, name=name)
opt_param_values_updated.append((name, opt_param_value))
opt_param_values = opt_param_values_updated
with temppath() as fname:
if hvd.rank() == 0:
state = {
'model': model.state_dict(),
'optimizer': optimizer.state_dict(),
}
torch.save(state, fname)
model, optimizer = create_model(opt_class, opt_params)
if hvd.rank() == 0:
checkpoint = torch.load(fname)
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
model_param_value_after = get_model_param_values(model)
for before, after in zip(model_param_values,
model_param_value_after):
name, model_param_value = before
name_after, model_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(model_param_value),
type(model_param_value_after))
self.assertTrue(
(model_param_value == model_param_value_after).all())
expected_tensors = hvd.broadcast_object(len(optimizer.state_dict()['state'].values()))
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
self.assertEqual(len(optimizer.state_dict()['state'].values()), expected_tensors)
opt_param_values_after = get_optimizer_param_values(optimizer)
for before, after in zip(opt_param_values, opt_param_values_after):
name, opt_param_value = before
name_after, opt_param_value_after = after
self.assertEqual(name, name_after)
self.assertEqual(type(opt_param_value),
type(opt_param_value_after))
if torch.is_tensor(opt_param_value):
self.assertTrue(
(opt_param_value == opt_param_value_after).all())
else:
self.assertEqual(opt_param_value, opt_param_value_after)
# TODO: investigate why this hangs on K80s
@unittest.skip
def test_broadcast_state_gpu(self):
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
# Set default tensor type, ensuring optimizer tensor-wrapping is robust
# to this setting.
try:
torch.set_default_tensor_type(torch.cuda.FloatTensor)
self.test_broadcast_state()
finally:
torch.set_default_tensor_type(torch.FloatTensor)
def test_broadcast_state_options(self):
hvd.init()
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
params_0 = dict(lr=0.1, momentum=0.8, weight_decay=0.2, nesterov=True,
betas=(0.9, 0.999), etas=(0.8, 2.4), step_sizes=(1e-5, 100))
params_1 = dict(lr=0.2, momentum=0.9, weight_decay=0.1, nesterov=False,
betas=(0.8, 0.9), etas=(0.25, 1.75), step_sizes=(1e-7, 5))
def create_model(opt_class):
model = torch.nn.Sequential(
torch.nn.Linear(D_in, H),
torch.nn.ReLU(),
torch.nn.Linear(H, D_out),
)
params = params_0 if hvd.rank() == 0 else params_1
p = {
k: v for k, v in params.items()
if k in inspect.getargspec(opt_class.__init__).args
}
opt = opt_class(model.parameters(), **p)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
return model, opt
# Include subclass name so we can sort them lexicographically, otherwise different
# ranks will have different optimizer orderings
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
for _, opt_class in optimizers:
model, optimizer = create_model(opt_class)
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
p0 = {
k: v for k, v in params_0.items()
if k in inspect.getargspec(opt_class.__init__).args
}
for k, p in p0.items():
p_actual = optimizer.param_groups[0][k]
if not isinstance(p, Iterable):
p_actual = [p_actual]
p = [p]
for i in range(len(p)):
self.assertEqual(type(p_actual[i]), type(p[i]))
self.assertAlmostEqual(p_actual[i], p[i], delta=1e-5)
# Ensure that the parameter option types are compatible with ops
y_pred = model(x)
loss = F.mse_loss(y_pred, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_broadcast_state_no_grad(self):
class ModelNoGrad(nn.Module):
def __init__(self, a, b):
super(ModelNoGrad, self).__init__()
self.a = nn.Parameter(a.int(), requires_grad=False)
self.b = nn.Parameter(b)
def forward(self, x):
return torch.index_select(self.b, 0, self.a.long()) * x
hvd.init()
a = torch.Tensor([1, 3])
b = torch.rand(4)
model = ModelNoGrad(a, b)
optimizer = torch.optim.SGD(model.parameters(), lr=0.001, weight_decay=1e-6, momentum=0.9, nesterov=True)
optimizer = hvd.DistributedOptimizer(optimizer, named_parameters=model.named_parameters())
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
grad = optimizer.param_groups[0]['params'][1].grad
bgrad = hvd.broadcast(grad, root_rank=0)
assert optimizer.param_groups[0]['params'][0].grad is None
assert torch.all(torch.eq(grad, bgrad)).item()
def test_broadcast_object(self):
hvd.init()
expected_obj = {
'hello': 123,
0: [1, 2]
}
obj = expected_obj if hvd.rank() == 0 else {}
obj = hvd.broadcast_object(obj, root_rank=0)
self.assertDictEqual(obj, expected_obj)
def test_allgather_object(self):
hvd.init()
d = {'metric_val_1': hvd.rank()}
if hvd.rank() == 1:
d['metric_val_2'] = 42
results = hvd.allgather_object(d)
expected = [{'metric_val_1': i} for i in range(hvd.size())]
if hvd.size() > 1:
expected[1] = {'metric_val_1': 1, 'metric_val_2': 42}
self.assertEqual(len(results), hvd.size())
self.assertListEqual(results, expected)
def test_compression_fp16(self):
valid_dtypes = [torch.float32, torch.float64]
invalid_dtypes = [torch.uint8, torch.int8, torch.int16,
torch.int32, torch.int64]
tensor_size = [5] * 3
compression = hvd.Compression.fp16
for dtype in valid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, torch.float16)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
for dtype in invalid_dtypes:
tensor = torch.ones(tensor_size, dtype=dtype)
tensor_compressed, ctx = compression.compress(tensor)
self.assertEqual(tensor_compressed.dtype, dtype)
tensor_decompressed = compression.decompress(tensor_compressed, ctx)
self.assertEqual(tensor_decompressed.dtype, dtype)
if dtype != torch.int8: # Cannot cast to NumPy with a CharTensor
expected = np.ones(tensor_size)
err = np.linalg.norm(expected - tensor_decompressed.data.numpy())
self.assertLess(err, 0.00000001)
def test_force_allreduce(self):
"""Test that allreduce is forced on all gradients during opt.step()."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
N, D_in, H, D_out = 64, 100, 10, 10
x = torch.randn(N, D_in).requires_grad_()
y = torch.randn(N, D_out).requires_grad_()
def new_optimizer(cls, opt_params, model):
p = {
k: v for k, v in opt_params.items()
if k in inspect.getargspec(cls.__init__).args
}
return cls(model.parameters(), **p)
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = torch.nn.Linear(D_in, H)
self.fc2 = torch.nn.Linear(H, D_out)
self.fc3 = torch.nn.Linear(D_out, D_out)
def forward(self, x_):
x_ = F.relu(self.fc1(x_))
x1_ = self.fc2(x_)
x2_ = self.fc3(F.relu(x1_))
return x1_, x2_
def create_model(opt_class, opt_params):
model = Net()
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
opt = new_optimizer(opt_class, opt_params, model)
opt = hvd.DistributedOptimizer(
opt, named_parameters=model.named_parameters())
return model, opt
# L-BFGS is currently unsupported, as are sparse tensors, which are
# required by SparseAdam optimizer
optimizers = [
(subclass.__name__, subclass)
for subclass in torch.optim.Optimizer.__subclasses__()
if subclass.__module__.startswith('torch.optim') and
subclass != torch.optim.LBFGS and
subclass != torch.optim.SparseAdam
]
optimizers.sort(key=lambda tup: tup[0])
opt_params_list = [
dict(lr=0.2, momentum=0.9, weight_decay=0.1, centered=True),
dict(lr=0.2)
]
for (opt_name, opt_class), opt_params in itertools.product(optimizers, opt_params_list):
model, optimizer = create_model(opt_class, opt_params)
y_pred1, y_pred2 = model(x)
if rank == 0:
loss = F.mse_loss(y_pred1, y, size_average=False)
else:
loss = F.mse_loss(y_pred2, y, size_average=False)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_model_parallelism(self):
"""Test that tensors on different GPUs are supported."""
# Only do this test if there are GPUs available.
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
# Skip the test if there are not enough GPUs.
if torch.cuda.device_count() < hvd.local_size() * 2:
self.skipTest("Not enough GPUs available")
first_device = local_rank * 2
second_device = local_rank * 2 + 1
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
# Place parts of model on different GPUs.
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(first_device)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(second_device)
def forward(self, x):
x = x.cuda(first_device)
x = self.conv1(x)
x = x.cuda(second_device)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters())
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_delta_optimizer(self):
"""Test that delta optimizer."""
hvd.init()
# TODO support non-MPI Adasum operation
# Only do this test if there are GPUs available.
if not hvd.mpi_enabled() or not torch.cuda.is_available():
self.skipTest("No GPUs available")
local_rank = hvd.local_rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1).cuda(local_rank)
self.conv2 = torch.nn.Conv2d(100, 1, 1).cuda(local_rank)
def forward(self, x):
x = x.cuda(local_rank)
x = self.conv1(x)
x = x.cuda(local_rank)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt, named_parameters=model.named_parameters(), op=hvd.Adasum)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_duplicate_names(self):
"""Test that passing duplicate names to optimizer will fail."""
net1 = torch.nn.Conv2d(1, 1, 1)
net2 = torch.nn.Conv2d(1, 1, 1)
parameters = itertools.chain(net1.parameters(), net2.parameters())
opt = torch.optim.SGD(parameters, lr=0.1)
# This will have duplicate names, since both net1 and net2 have 'weight' and 'bias'
named_parameters = itertools.chain(net1.named_parameters(), net2.named_parameters())
try:
hvd.DistributedOptimizer(opt, named_parameters=named_parameters)
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_dynamic_requires_grad(self):
"""Test that makes sure that gradients can be turned off/on dynamically."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
gen = torch.nn.Conv2d(1, 10, 1)
disc = torch.nn.Conv2d(10, 1, 1)
inp = torch.rand([1, 1, 100, 100])
gen_opt = torch.optim.SGD(gen.parameters(), lr=0.1)
gen_opt = hvd.DistributedOptimizer(gen_opt, named_parameters=gen.named_parameters())
disc_opt = torch.optim.SGD(disc.parameters(), lr=0.1)
disc_opt = hvd.DistributedOptimizer(disc_opt, named_parameters=disc.named_parameters())
def train_step(train_generator=False, train_discriminator=False):
for p in gen.parameters():
p.requires_grad_(train_generator)
for p in disc.parameters():
p.requires_grad_(train_discriminator)
gen_opt.zero_grad()
disc_opt.zero_grad()
loss = disc(gen(inp)).sum()
loss.backward()
for p in gen.parameters():
assert train_generator == (p.grad is not None and p.grad.max().is_nonzero()), \
'Gradient for generator is zero but it should be trained or vice versa.'
for p in disc.parameters():
assert train_discriminator == (p.grad is not None and p.grad.max().is_nonzero()), \
'Gradient for discriminator is zero but it should be trained or vice versa.'
if train_generator:
gen_opt.step()
if train_discriminator:
disc_opt.step()
for x in range(10):
# Step 1: train generator.
train_step(train_generator=True)
# Step 2: train discriminator.
train_step(train_discriminator=True)
def test_gradient_clipping(self):
"""Test gradient clipping example."""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.ones(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
model.weight = torch.nn.Parameter(torch.zeros(1, 1) + 0.5)
model.bias = torch.nn.Parameter(torch.zeros(1))
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
prior_grad = model.weight.grad.item()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
clipped_grad = model.weight.grad.item()
assert abs(prior_grad) > abs(clipped_grad)
with optimizer.skip_synchronize():
optimizer.step()
def test_synchronize_step_warning(self):
"""
Test that .synchronize() followed by .step() without
optimizer.skip_synchronize() context will produce a warning.
"""
hvd.init()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
x = torch.zeros(1, 1).requires_grad_()
y = torch.ones(1, 1).requires_grad_()
model = torch.nn.Linear(1, 1)
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
optimizer = torch.optim.SGD(model.parameters(), lr=0.1)
optimizer = hvd.DistributedOptimizer(
optimizer, named_parameters=model.named_parameters())
y_pred = model(x)
loss = F.mse_loss(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.synchronize()
torch.nn.utils.clip_grad_norm_(model.parameters(), 0.1)
with warnings.catch_warnings(record=True) as ws:
optimizer.step()
assert len(ws) == 1
assert 'optimizer.step() called without optimizer.skip_synchronize()' \
in str(ws[0].message)
def test_no_named_parameters(self):
"""Test that leaving the default named_parameters=None will not throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
inp = torch.rand([1, 1, 1000, 1000])
opt = torch.optim.SGD(model.parameters(), lr=0.1)
opt = hvd.DistributedOptimizer(opt)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_missing_named_parameters(self):
"""Test that naming half of the model parameters will throw an error."""
hvd.init()
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = torch.nn.Conv2d(1, 100, 1)
self.conv2 = torch.nn.Conv2d(100, 1, 1)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
return x
model = Net()
opt = torch.optim.SGD(model.parameters(), lr=0.1)
try:
hvd.DistributedOptimizer(opt,
named_parameters=list(model.named_parameters())[0:1])
assert False, 'hvd.DistributedOptimizer did not throw error'
except ValueError:
pass
def test_horovod_join_allreduce(self):
"""Test Join op with allreduce."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
dtypes = [torch.IntTensor, torch.LongTensor,
torch.FloatTensor, torch.DoubleTensor]
if torch.cuda.is_available():
dtypes += [torch.cuda.IntTensor, torch.cuda.LongTensor,
torch.cuda.FloatTensor, torch.cuda.DoubleTensor,
torch.cuda.HalfTensor]
integral_types = [torch.IntTensor, torch.LongTensor, torch.cuda.IntTensor, torch.cuda.LongTensor]
dims = [1, 2, 3]
first_join_ranks = [0, 1]
cachings = [False, True]
for dtype, dim, first_join_rank, caching in itertools.product(dtypes, dims, first_join_ranks, cachings):
torch.manual_seed(1234)
def div(t, s):
if _1_5_api and dtype in integral_types:
return t.floor_divide(s)
return t / s
# Use two tensors to test fusion
tensor_a = torch.FloatTensor(*([5] * dim)).random_(-100, 100)
tensor_a = self.cast_and_place(tensor_a, dtype)
tensor_b = torch.FloatTensor(*([17] * dim)).random_(-100, 100)
tensor_b = self.cast_and_place(tensor_b, dtype)
if caching:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if rank == first_join_rank:
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
handle_a = hvd.allreduce_async(tensor_a, name="tensor_a", average=True)
handle_b = hvd.allreduce_async(tensor_b, name="tensor_b", average=True)
averaged_a = hvd.synchronize(handle_a)
averaged_b = hvd.synchronize(handle_b)
if dtype.is_cuda:
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
# Threshold for floating point equality depends on number of
# ranks, since we're comparing against precise multiplication.
if size <= 3 or dtype in integral_types:
threshold = 0
elif size < 10:
threshold = 1e-4
elif size < 15:
threshold = 5e-4
else:
break
assert torch.allclose(averaged_a, div(tensor_a * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
assert torch.allclose(averaged_b, div(tensor_b * (size - 1), size), threshold), \
'hvd.join with hvd.allreduce produces incorrect results'
def test_horovod_join_allgather(self):
"""Test Join op with allgather."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
else:
try:
hvd.allgather(tensor)
assert False, 'hvd.allgather did not throw error'
except (torch.FatalError, RuntimeError):
pass
ret = hvd.join(hvd.local_rank())
def test_horovod_join_broadcast(self):
"""Test Join op with broadcast."""
hvd.init()
rank = hvd.rank()
size = hvd.size()
# This test does not apply if there is only one worker.
if size == 1:
self.skipTest("Only one worker available")
dims = [17] * 3
tensor = torch.FloatTensor(*dims)
if rank == 0:
ret = hvd.join(hvd.local_rank())
else:
try:
broadcasted_tensor = hvd.broadcast(tensor, 1, name="test_horovod_join_broadcast")
assert False, 'hvd.broadcast did not throw error'
except (torch.FatalError, RuntimeError):
pass
if torch.cuda.is_available():
ret = hvd.join(hvd.local_rank())
else:
ret = hvd.join()
def test_horovod_sync_batch_norm(self):
"""Tests Horovod version of SyncBatchNorm."""
if not torch.cuda.is_available():
self.skipTest("No GPUs available")
hvd.init()
ts_list = [
torch.stack([
torch.tensor([
[r, r + 1],
[r * 2, r * 2 + 1],
[r * 3, r * 3 + 1],
[r * 4, r * 4 + 1]
])
for r in range(hvd.size())
]),
torch.stack([
torch.tensor([
[r + 1],
[r * 2 + 1],
[r * 3 + 1],
[r * 4 + 1]
])
for r in range(hvd.size())
]),
]
for ts in ts_list:
sync_bn = hvd.SyncBatchNorm(num_features=4)
sync_bn.cuda(hvd.local_rank())
bn = torch.nn.BatchNorm1d(num_features=4)
bn.cuda(hvd.local_rank())
ts = ts.cuda(hvd.local_rank()).float()
ts1 = ts.clone().requires_grad_()
ts2 = ts.clone().requires_grad_()
# Training
sync_bn_out = sync_bn(ts1[hvd.rank()].unsqueeze(0))
bn_out = bn(ts2)
assert torch.allclose(sync_bn_out, bn_out[hvd.rank()].unsqueeze(0), 1e-6)
assert torch.allclose(sync_bn.running_mean, bn.running_mean, 1e-6)
assert torch.allclose(sync_bn.running_var, bn.running_var, 1e-6)
# Gradients
sync_bn_out.sum().backward()
bn_out.mean(dim=0).sum().backward()
assert torch.allclose(hvd.allreduce(sync_bn.weight.grad, name='sync_bn.weight.grad'), bn.weight.grad, 1e-6)
assert torch.allclose(hvd.allreduce(sync_bn.bias.grad, name='sync_bn.bias.grad'), bn.bias.grad, 1e-6)
assert torch.allclose(hvd.allreduce(ts1.grad, name='ts1.grad'), ts2.grad, 1e-6)
@pytest.mark.skipif(platform.system() == 'Darwin', reason='https://github.com/horovod/horovod/issues/2496')
def test_timeline_api(self):
hvd.init()
def check_file(fname, check_cycle=True):
if hvd.rank() == 0:
with open(fname, 'r') as timeline_file:
timeline_text = timeline_file.read()
assert 'allreduce.test_allreduce' in timeline_text, timeline_text
assert 'start_time_since_epoch_in_micros' in timeline_text, timeline_text
assert 'NEGOTIATE_ALLREDUCE' in timeline_text, timeline_text
assert 'ALLREDUCE' in timeline_text, timeline_text
json_obj = json.loads(timeline_text)
assert json_obj is not None
if check_cycle:
assert 'CYCLE_START' in timeline_text, timeline_text
with temppath() as fname1:
hvd.start_timeline(fname1, mark_cycles=True)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that mark_cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname1)
# Test resuming with a different filename.
with temppath() as fname2:
hvd.start_timeline(fname2, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname2)
# Test resuming with a different filename, but mark_cycles=False
with temppath() as fname3:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname3, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that events can be registered in timeline file.
hvd.stop_timeline()
check_file(fname3, check_cycle=False)
# Test resuming with a different filename, but mark_cycles=True
with temppath() as fname4:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname4, mark_cycles=True)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy();
# stop timeline will immediately stop events to be registered in timeline. We are providing some time
# before calling stop so that cycle events can be registered in timeline file.
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname4, check_cycle=True)
with temppath() as fname5:
# Make sure that last stop timeline has been processed.
hvd.start_timeline(fname5, mark_cycles=False)
hvd.start_timeline(fname5, mark_cycles=False)
time.sleep(0.2)
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
hvd.allreduce(torch.tensor([1, 2, 3], dtype=torch.float32), name='test_allreduce').numpy()
time.sleep(0.2)
hvd.stop_timeline()
check_file(fname5, check_cycle=False)
hvd.shutdown()
def test_optimizer_no_named_parameters(self):
hvd.init()
model = nn.Sequential(nn.Linear(10, 10), nn.Linear(10, 10))
optimizer = torch.optim.SGD(
[{"params": model[0].parameters()}, {"params": model[1].parameters()}, ],
lr=0.001,
)
optimizer = hvd.DistributedOptimizer(optimizer)
params = optimizer._parameter_names
self.assertEqual(len(params), len(set(params.values())))
# Make sure all workers have the same set of parameter names
all_param_names = hvd.allgather_object(set(params.values()))
self.assertEqual(len(all_param_names), hvd.size())
for param_names in all_param_names:
self.assertEqual(all_param_names[0], param_names)
def test_sparse_embeddings(self):
"""Test that Horovod will correctly aggregate sparse gradients."""
hvd.init()
for sparse_as_dense in [False, True]:
class Net(torch.nn.Module):
def __init__(self):
super(Net, self).__init__()
self.embedding = nn.Embedding(10, 3, sparse=True)
def forward(self, x):
x = self.embedding(x)
return x
model = Net()
if hvd.rank() == 0:
inp = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
else:
inp = torch.LongTensor([[1, 3, 4], [4, 7, 9]])
# list() see: https://github.com/pytorch/pytorch/issues/47594
opt = torch.optim.SparseAdam(list(model.parameters()), lr=0.1)
opt = hvd.DistributedOptimizer(opt, sparse_as_dense=sparse_as_dense)
loss = model(inp).sum()
opt.zero_grad()
loss.backward()
opt.step()
def test_async_sparse_allreduce(self):
"""Test that allgather over indices and values is equivalent to allreduce."""
hvd.init()
# Generate random tensors, then convert them to sparse
def random_sparse_tensor(*shape):
t = torch.rand(*shape)
t[t < 0.8] = 0
return t.to_sparse()
tensor_sizes = [17, 32, 81, 12, 15, 23, 22] * 5
tensors = [random_sparse_tensor(d0, 10) for d0 in tensor_sizes]
allreduced_tensors = [hvd.allreduce(t.to_dense()) for t in tensors]
handles = [hvd.sparse_allreduce_async(t, op=hvd.Average, name=str(i))
for i, t in enumerate(tensors)]
allgathered_tensors = [handle() for handle in handles]
for reduced, gathered in zip(allreduced_tensors, allgathered_tensors):
assert torch.allclose(reduced, gathered.to_dense(), 1e-6)
if __name__ == "__main__":
unittest.main()
| [
"horovod.torch.synchronize",
"torch.optim.Optimizer.__subclasses__",
"torch.nn.BatchNorm1d",
"horovod.torch.size",
"torch.cuda.is_available",
"torch.IntTensor",
"horovod.torch.stop_timeline",
"itertools.product",
"horovod.torch.allgather_object",
"horovod.torch.start_timeline",
"platform.system"... | [((1322, 1353), 'distutils.version.LooseVersion', 'LooseVersion', (['torch.__version__'], {}), '(torch.__version__)\n', (1334, 1353), False, 'from distutils.version import LooseVersion\n'), ((1357, 1378), 'distutils.version.LooseVersion', 'LooseVersion', (['"""1.5.0"""'], {}), "('1.5.0')\n", (1369, 1378), False, 'from distutils.version import LooseVersion\n'), ((102147, 102162), 'unittest.main', 'unittest.main', ([], {}), '()\n', (102160, 102162), False, 'import unittest\n'), ((1187, 1212), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (1202, 1212), False, 'import os\n'), ((1890, 1921), 'warnings.simplefilter', 'warnings.simplefilter', (['"""module"""'], {}), "('module')\n", (1911, 1921), False, 'import warnings\n'), ((3069, 3092), 'common.mpi_env_rank_and_size', 'mpi_env_rank_and_size', ([], {}), '()\n', (3090, 3092), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((3409, 3419), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (3417, 3419), True, 'import horovod.torch as hvd\n'), ((3472, 3486), 'horovod.torch.shutdown', 'hvd.shutdown', ([], {}), '()\n', (3484, 3486), True, 'import horovod.torch as hvd\n'), ((3495, 3505), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (3503, 3505), True, 'import horovod.torch as hvd\n'), ((3747, 3757), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (3755, 3757), True, 'import horovod.torch as hvd\n'), ((3773, 3793), 'horovod.torch.is_initialized', 'hvd.is_initialized', ([], {}), '()\n', (3791, 3793), True, 'import horovod.torch as hvd\n'), ((3998, 4012), 'horovod.torch.shutdown', 'hvd.shutdown', ([], {}), '()\n', (4010, 4012), True, 'import horovod.torch as hvd\n'), ((4061, 4071), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (4069, 4071), True, 'import horovod.torch as hvd\n'), ((4196, 4219), 'common.mpi_env_rank_and_size', 'mpi_env_rank_and_size', ([], {}), '()\n', (4217, 4219), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((4434, 4444), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (4442, 4444), True, 'import horovod.torch as hvd\n'), ((4460, 4470), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (4468, 4470), True, 'import horovod.torch as hvd\n'), ((4702, 4725), 'common.mpi_env_rank_and_size', 'mpi_env_rank_and_size', ([], {}), '()\n', (4723, 4725), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((4940, 4950), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (4948, 4950), True, 'import horovod.torch as hvd\n'), ((4966, 4976), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (4974, 4976), True, 'import horovod.torch as hvd\n'), ((5203, 5213), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (5211, 5213), True, 'import horovod.torch as hvd\n'), ((5229, 5239), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (5237, 5239), True, 'import horovod.torch as hvd\n'), ((5411, 5436), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (5434, 5436), False, 'import torch\n'), ((5675, 5706), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (5692, 5706), False, 'import itertools\n'), ((6785, 6795), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (6793, 6795), True, 'import horovod.torch as hvd\n'), ((6811, 6821), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (6819, 6821), True, 'import horovod.torch as hvd\n'), ((6975, 7000), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (6998, 7000), False, 'import torch\n'), ((7239, 7270), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (7256, 7270), False, 'import itertools\n'), ((8230, 8240), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (8238, 8240), True, 'import horovod.torch as hvd\n'), ((8256, 8266), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (8264, 8266), True, 'import horovod.torch as hvd\n'), ((8438, 8463), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (8461, 8463), False, 'import torch\n'), ((8702, 8733), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (8719, 8733), False, 'import itertools\n'), ((9867, 9877), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (9875, 9877), True, 'import horovod.torch as hvd\n'), ((9893, 9903), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (9901, 9903), True, 'import horovod.torch as hvd\n'), ((10072, 10097), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (10095, 10097), False, 'import torch\n'), ((10394, 10425), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (10411, 10425), False, 'import itertools\n'), ((12063, 12073), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (12071, 12073), True, 'import horovod.torch as hvd\n'), ((12095, 12111), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (12109, 12111), True, 'import horovod.torch as hvd\n'), ((12127, 12137), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (12135, 12137), True, 'import horovod.torch as hvd\n'), ((12550, 12581), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (12567, 12581), False, 'import itertools\n'), ((13598, 13608), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (13606, 13608), True, 'import horovod.torch as hvd\n'), ((13624, 13634), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (13632, 13634), True, 'import horovod.torch as hvd\n'), ((13802, 13827), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (13825, 13827), False, 'import torch\n'), ((14252, 14283), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (14269, 14283), False, 'import itertools\n'), ((16363, 16373), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (16371, 16373), True, 'import horovod.torch as hvd\n'), ((16389, 16399), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (16397, 16399), True, 'import horovod.torch as hvd\n'), ((16567, 16592), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (16590, 16592), False, 'import torch\n'), ((17017, 17048), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (17034, 17048), False, 'import itertools\n'), ((19180, 19190), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (19188, 19190), True, 'import horovod.torch as hvd\n'), ((19206, 19216), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (19214, 19216), True, 'import horovod.torch as hvd\n'), ((19232, 19242), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (19240, 19242), True, 'import horovod.torch as hvd\n'), ((19435, 19458), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (19452, 19458), False, 'import torch\n'), ((19785, 19808), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (19802, 19808), False, 'import torch\n'), ((20324, 20334), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (20332, 20334), True, 'import horovod.torch as hvd\n'), ((20350, 20360), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (20358, 20360), True, 'import horovod.torch as hvd\n'), ((20376, 20386), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (20384, 20386), True, 'import horovod.torch as hvd\n'), ((21439, 21449), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (21447, 21449), True, 'import horovod.torch as hvd\n'), ((21465, 21475), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (21473, 21475), True, 'import horovod.torch as hvd\n'), ((21491, 21501), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (21499, 21501), True, 'import horovod.torch as hvd\n'), ((22212, 22222), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (22220, 22222), True, 'import horovod.torch as hvd\n'), ((22238, 22248), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (22246, 22248), True, 'import horovod.torch as hvd\n'), ((22433, 22457), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (22450, 22457), False, 'import torch\n'), ((22467, 22517), 'horovod.torch.allreduce_async', 'hvd.allreduce_async', (['tensor'], {'name': '"""duplicate_name"""'}), "(tensor, name='duplicate_name')\n", (22486, 22517), True, 'import horovod.torch as hvd\n'), ((22876, 22886), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (22884, 22886), True, 'import horovod.torch as hvd\n'), ((22902, 22912), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (22910, 22912), True, 'import horovod.torch as hvd\n'), ((23050, 23075), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (23073, 23075), False, 'import torch\n'), ((23223, 23254), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (23240, 23254), False, 'import itertools\n'), ((24074, 24084), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (24082, 24084), True, 'import horovod.torch as hvd\n'), ((24222, 24247), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (24245, 24247), False, 'import torch\n'), ((24395, 24426), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (24412, 24426), False, 'import itertools\n'), ((25243, 25253), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (25251, 25253), True, 'import horovod.torch as hvd\n'), ((25269, 25279), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (25277, 25279), True, 'import horovod.torch as hvd\n'), ((25451, 25476), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (25474, 25476), False, 'import torch\n'), ((25715, 25746), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (25732, 25746), False, 'import itertools\n'), ((27011, 27021), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (27019, 27021), True, 'import horovod.torch as hvd\n'), ((27037, 27047), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (27045, 27047), True, 'import horovod.torch as hvd\n'), ((27219, 27244), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (27242, 27244), False, 'import torch\n'), ((27483, 27514), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (27500, 27514), False, 'import itertools\n'), ((28728, 28738), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (28736, 28738), True, 'import horovod.torch as hvd\n'), ((28754, 28764), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (28762, 28764), True, 'import horovod.torch as hvd\n'), ((28936, 28961), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (28959, 28961), False, 'import torch\n'), ((29200, 29231), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (29217, 29231), False, 'import itertools\n'), ((30731, 30741), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (30739, 30741), True, 'import horovod.torch as hvd\n'), ((31173, 31183), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (31181, 31183), True, 'import horovod.torch as hvd\n'), ((31199, 31209), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (31207, 31209), True, 'import horovod.torch as hvd\n'), ((31347, 31372), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (31370, 31372), False, 'import torch\n'), ((31520, 31551), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (31537, 31551), False, 'import itertools\n'), ((32574, 32584), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (32582, 32584), True, 'import horovod.torch as hvd\n'), ((32722, 32747), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (32745, 32747), False, 'import torch\n'), ((32895, 32926), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (32912, 32926), False, 'import itertools\n'), ((33933, 33943), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (33941, 33943), True, 'import horovod.torch as hvd\n'), ((33959, 33969), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (33967, 33969), True, 'import horovod.torch as hvd\n'), ((33985, 33995), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (33993, 33995), True, 'import horovod.torch as hvd\n'), ((34209, 34234), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (34232, 34234), False, 'import torch\n'), ((34566, 34597), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (34583, 34597), False, 'import itertools\n'), ((35572, 35582), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (35580, 35582), True, 'import horovod.torch as hvd\n'), ((35598, 35608), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (35606, 35608), True, 'import horovod.torch as hvd\n'), ((35624, 35634), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (35632, 35634), True, 'import horovod.torch as hvd\n'), ((35848, 35873), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (35871, 35873), False, 'import torch\n'), ((36205, 36236), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (36222, 36236), False, 'import itertools\n'), ((37402, 37412), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (37410, 37412), True, 'import horovod.torch as hvd\n'), ((37428, 37438), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (37436, 37438), True, 'import horovod.torch as hvd\n'), ((37454, 37464), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (37462, 37464), True, 'import horovod.torch as hvd\n'), ((37678, 37703), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (37701, 37703), False, 'import torch\n'), ((38093, 38124), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (38110, 38124), False, 'import itertools\n'), ((39400, 39410), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (39408, 39410), True, 'import horovod.torch as hvd\n'), ((39426, 39436), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (39434, 39436), True, 'import horovod.torch as hvd\n'), ((39452, 39462), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (39460, 39462), True, 'import horovod.torch as hvd\n'), ((40099, 40109), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (40107, 40109), True, 'import horovod.torch as hvd\n'), ((40125, 40135), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (40133, 40135), True, 'import horovod.torch as hvd\n'), ((40151, 40161), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (40159, 40161), True, 'import horovod.torch as hvd\n'), ((40845, 40855), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (40853, 40855), True, 'import horovod.torch as hvd\n'), ((40871, 40881), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (40879, 40881), True, 'import horovod.torch as hvd\n'), ((41066, 41090), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (41083, 41090), False, 'import torch\n'), ((41100, 41150), 'horovod.torch.allgather_async', 'hvd.allgather_async', (['tensor'], {'name': '"""duplicate_name"""'}), "(tensor, name='duplicate_name')\n", (41119, 41150), True, 'import horovod.torch as hvd\n'), ((41509, 41519), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (41517, 41519), True, 'import horovod.torch as hvd\n'), ((41535, 41545), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (41543, 41545), True, 'import horovod.torch as hvd\n'), ((41561, 41571), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (41569, 41571), True, 'import horovod.torch as hvd\n'), ((41710, 41735), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (41733, 41735), False, 'import torch\n'), ((41883, 41914), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (41900, 41914), False, 'import itertools\n'), ((43200, 43210), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (43208, 43210), True, 'import horovod.torch as hvd\n'), ((43226, 43236), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (43234, 43236), True, 'import horovod.torch as hvd\n'), ((43252, 43262), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (43260, 43262), True, 'import horovod.torch as hvd\n'), ((43618, 43643), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (43641, 43643), False, 'import torch\n'), ((44025, 44068), 'itertools.product', 'itertools.product', (['dtypes', 'dims', 'root_ranks'], {}), '(dtypes, dims, root_ranks)\n', (44042, 44068), False, 'import itertools\n'), ((44991, 45001), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (44999, 45001), True, 'import horovod.torch as hvd\n'), ((45017, 45027), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (45025, 45027), True, 'import horovod.torch as hvd\n'), ((45043, 45053), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (45051, 45053), True, 'import horovod.torch as hvd\n'), ((45409, 45434), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (45432, 45434), False, 'import torch\n'), ((45816, 45859), 'itertools.product', 'itertools.product', (['dtypes', 'dims', 'root_ranks'], {}), '(dtypes, dims, root_ranks)\n', (45833, 45859), False, 'import itertools\n'), ((46817, 46827), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (46825, 46827), True, 'import horovod.torch as hvd\n'), ((46843, 46853), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (46851, 46853), True, 'import horovod.torch as hvd\n'), ((46869, 46879), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (46877, 46879), True, 'import horovod.torch as hvd\n'), ((47522, 47532), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (47530, 47532), True, 'import horovod.torch as hvd\n'), ((47548, 47558), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (47556, 47558), True, 'import horovod.torch as hvd\n'), ((47574, 47584), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (47582, 47584), True, 'import horovod.torch as hvd\n'), ((48251, 48261), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (48259, 48261), True, 'import horovod.torch as hvd\n'), ((48277, 48287), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (48285, 48287), True, 'import horovod.torch as hvd\n'), ((48303, 48313), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (48311, 48313), True, 'import horovod.torch as hvd\n'), ((48885, 48895), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (48893, 48895), True, 'import horovod.torch as hvd\n'), ((48911, 48921), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (48919, 48921), True, 'import horovod.torch as hvd\n'), ((49106, 49130), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (49123, 49130), False, 'import torch\n'), ((49140, 49203), 'horovod.torch.broadcast_async', 'hvd.broadcast_async', (['tensor'], {'root_rank': '(0)', 'name': '"""duplicate_name"""'}), "(tensor, root_rank=0, name='duplicate_name')\n", (49159, 49203), True, 'import horovod.torch as hvd\n'), ((49575, 49585), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (49583, 49585), True, 'import horovod.torch as hvd\n'), ((49601, 49611), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (49609, 49611), True, 'import horovod.torch as hvd\n'), ((49627, 49637), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (49635, 49637), True, 'import horovod.torch as hvd\n'), ((49918, 49943), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (49941, 49943), False, 'import torch\n'), ((50141, 50184), 'itertools.product', 'itertools.product', (['dtypes', 'dims', 'root_ranks'], {}), '(dtypes, dims, root_ranks)\n', (50158, 50184), False, 'import itertools\n'), ((51030, 51040), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (51038, 51040), True, 'import horovod.torch as hvd\n'), ((51056, 51066), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (51064, 51066), True, 'import horovod.torch as hvd\n'), ((51082, 51092), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (51090, 51092), True, 'import horovod.torch as hvd\n'), ((51589, 51614), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (51612, 51614), False, 'import torch\n'), ((51946, 51977), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (51963, 51977), False, 'import itertools\n'), ((53196, 53206), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (53204, 53206), True, 'import horovod.torch as hvd\n'), ((53222, 53232), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (53230, 53232), True, 'import horovod.torch as hvd\n'), ((53248, 53258), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (53256, 53258), True, 'import horovod.torch as hvd\n'), ((53755, 53780), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (53778, 53780), False, 'import torch\n'), ((54112, 54143), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (54129, 54143), False, 'import itertools\n'), ((55088, 55098), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (55096, 55098), True, 'import horovod.torch as hvd\n'), ((55114, 55124), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (55122, 55124), True, 'import horovod.torch as hvd\n'), ((55140, 55150), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (55148, 55150), True, 'import horovod.torch as hvd\n'), ((55980, 56011), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (55997, 56011), False, 'import itertools\n'), ((57371, 57381), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (57379, 57381), True, 'import horovod.torch as hvd\n'), ((57397, 57407), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (57405, 57407), True, 'import horovod.torch as hvd\n'), ((57423, 57433), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (57431, 57433), True, 'import horovod.torch as hvd\n'), ((58322, 58332), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (58330, 58332), True, 'import horovod.torch as hvd\n'), ((58348, 58358), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (58356, 58358), True, 'import horovod.torch as hvd\n'), ((58374, 58384), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (58382, 58384), True, 'import horovod.torch as hvd\n'), ((58743, 58764), 'torch.empty', 'torch.empty', (['(size + 1)'], {}), '(size + 1)\n', (58754, 58764), False, 'import torch\n'), ((59139, 59149), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (59147, 59149), True, 'import horovod.torch as hvd\n'), ((59165, 59175), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (59173, 59175), True, 'import horovod.torch as hvd\n'), ((59191, 59201), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (59199, 59201), True, 'import horovod.torch as hvd\n'), ((59418, 59439), 'torch.empty', 'torch.empty', (['(size - 1)'], {}), '(size - 1)\n', (59429, 59439), False, 'import torch\n'), ((59457, 59492), 'torch.ones', 'torch.ones', (['size'], {'dtype': 'torch.int32'}), '(size, dtype=torch.int32)\n', (59467, 59492), False, 'import torch\n'), ((59856, 59866), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (59864, 59866), True, 'import horovod.torch as hvd\n'), ((59882, 59892), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (59890, 59892), True, 'import horovod.torch as hvd\n'), ((59908, 59918), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (59916, 59918), True, 'import horovod.torch as hvd\n'), ((60135, 60152), 'torch.empty', 'torch.empty', (['size'], {}), '(size)\n', (60146, 60152), False, 'import torch\n'), ((60170, 60208), 'torch.empty', 'torch.empty', (['size'], {'dtype': 'torch.float32'}), '(size, dtype=torch.float32)\n', (60181, 60208), False, 'import torch\n'), ((60590, 60600), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (60598, 60600), True, 'import horovod.torch as hvd\n'), ((60616, 60626), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (60624, 60626), True, 'import horovod.torch as hvd\n'), ((60642, 60652), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (60650, 60652), True, 'import horovod.torch as hvd\n'), ((61089, 61112), 'torch.ones', 'torch.ones', (['tensor_size'], {}), '(tensor_size)\n', (61099, 61112), False, 'import torch\n'), ((61400, 61410), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (61408, 61410), True, 'import horovod.torch as hvd\n'), ((61426, 61436), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (61434, 61436), True, 'import horovod.torch as hvd\n'), ((61452, 61462), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (61460, 61462), True, 'import horovod.torch as hvd\n'), ((61799, 61824), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (61822, 61824), False, 'import torch\n'), ((61973, 62004), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (61990, 62004), False, 'import itertools\n'), ((63093, 63103), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (63101, 63103), True, 'import horovod.torch as hvd\n'), ((63119, 63129), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (63127, 63129), True, 'import horovod.torch as hvd\n'), ((63145, 63155), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (63153, 63155), True, 'import horovod.torch as hvd\n'), ((63492, 63517), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (63515, 63517), False, 'import torch\n'), ((63666, 63697), 'itertools.product', 'itertools.product', (['dtypes', 'dims'], {}), '(dtypes, dims)\n', (63683, 63697), False, 'import itertools\n'), ((64587, 64597), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (64595, 64597), True, 'import horovod.torch as hvd\n'), ((66830, 66876), 'itertools.product', 'itertools.product', (['optimizers', 'opt_params_list'], {}), '(optimizers, opt_params_list)\n', (66847, 66876), False, 'import itertools\n'), ((70672, 70682), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (70680, 70682), True, 'import horovod.torch as hvd\n'), ((73803, 73813), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (73811, 73813), True, 'import horovod.torch as hvd\n'), ((73827, 73847), 'torch.Tensor', 'torch.Tensor', (['[1, 3]'], {}), '([1, 3])\n', (73839, 73847), False, 'import torch\n'), ((73860, 73873), 'torch.rand', 'torch.rand', (['(4)'], {}), '(4)\n', (73870, 73873), False, 'import torch\n'), ((74198, 74251), 'horovod.torch.broadcast_optimizer_state', 'hvd.broadcast_optimizer_state', (['optimizer'], {'root_rank': '(0)'}), '(optimizer, root_rank=0)\n', (74227, 74251), True, 'import horovod.torch as hvd\n'), ((74328, 74360), 'horovod.torch.broadcast', 'hvd.broadcast', (['grad'], {'root_rank': '(0)'}), '(grad, root_rank=0)\n', (74341, 74360), True, 'import horovod.torch as hvd\n'), ((74530, 74540), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (74538, 74540), True, 'import horovod.torch as hvd\n'), ((74694, 74732), 'horovod.torch.broadcast_object', 'hvd.broadcast_object', (['obj'], {'root_rank': '(0)'}), '(obj, root_rank=0)\n', (74714, 74732), True, 'import horovod.torch as hvd\n'), ((74827, 74837), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (74835, 74837), True, 'import horovod.torch as hvd\n'), ((74962, 74985), 'horovod.torch.allgather_object', 'hvd.allgather_object', (['d'], {}), '(d)\n', (74982, 74985), True, 'import horovod.torch as hvd\n'), ((76825, 76835), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (76833, 76835), True, 'import horovod.torch as hvd\n'), ((76851, 76861), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (76859, 76861), True, 'import horovod.torch as hvd\n'), ((76877, 76887), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (76885, 76887), True, 'import horovod.torch as hvd\n'), ((78893, 78939), 'itertools.product', 'itertools.product', (['optimizers', 'opt_params_list'], {}), '(optimizers, opt_params_list)\n', (78910, 78939), False, 'import itertools\n'), ((79574, 79584), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (79582, 79584), True, 'import horovod.torch as hvd\n'), ((79606, 79622), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (79620, 79622), True, 'import horovod.torch as hvd\n'), ((79638, 79648), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (79646, 79648), True, 'import horovod.torch as hvd\n'), ((80614, 80644), 'torch.rand', 'torch.rand', (['[1, 1, 1000, 1000]'], {}), '([1, 1, 1000, 1000])\n', (80624, 80644), False, 'import torch\n'), ((80977, 80987), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (80985, 80987), True, 'import horovod.torch as hvd\n'), ((81229, 81245), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (81243, 81245), True, 'import horovod.torch as hvd\n'), ((81261, 81271), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (81269, 81271), True, 'import horovod.torch as hvd\n'), ((81916, 81946), 'torch.rand', 'torch.rand', (['[1, 1, 1000, 1000]'], {}), '([1, 1, 1000, 1000])\n', (81926, 81946), False, 'import torch\n'), ((82332, 82356), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (82347, 82356), False, 'import torch\n'), ((82372, 82396), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(1)', '(1)'], {}), '(1, 1, 1)\n', (82387, 82396), False, 'import torch\n'), ((82487, 82522), 'torch.optim.SGD', 'torch.optim.SGD', (['parameters'], {'lr': '(0.1)'}), '(parameters, lr=0.1)\n', (82502, 82522), False, 'import torch\n'), ((83051, 83061), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (83059, 83061), True, 'import horovod.torch as hvd\n'), ((83077, 83087), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (83085, 83087), True, 'import horovod.torch as hvd\n'), ((83245, 83270), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(10)', '(1)'], {}), '(1, 10, 1)\n', (83260, 83270), False, 'import torch\n'), ((83286, 83311), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(10)', '(1)', '(1)'], {}), '(10, 1, 1)\n', (83301, 83311), False, 'import torch\n'), ((83326, 83354), 'torch.rand', 'torch.rand', (['[1, 1, 100, 100]'], {}), '([1, 1, 100, 100])\n', (83336, 83354), False, 'import torch\n'), ((84958, 84968), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (84966, 84968), True, 'import horovod.torch as hvd\n'), ((84984, 84994), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (84992, 84994), True, 'import horovod.torch as hvd\n'), ((85247, 85268), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (85262, 85268), False, 'import torch\n'), ((85676, 85697), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_pred', 'y'], {}), '(y_pred, y)\n', (85686, 85697), True, 'import torch.nn.functional as F\n'), ((86273, 86283), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (86281, 86283), True, 'import horovod.torch as hvd\n'), ((86299, 86309), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (86307, 86309), True, 'import horovod.torch as hvd\n'), ((86563, 86584), 'torch.nn.Linear', 'torch.nn.Linear', (['(1)', '(1)'], {}), '(1, 1)\n', (86578, 86584), False, 'import torch\n'), ((86869, 86890), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_pred', 'y'], {}), '(y_pred, y)\n', (86879, 86890), True, 'import torch.nn.functional as F\n'), ((87421, 87431), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (87429, 87431), True, 'import horovod.torch as hvd\n'), ((87822, 87852), 'torch.rand', 'torch.rand', (['[1, 1, 1000, 1000]'], {}), '([1, 1, 1000, 1000])\n', (87832, 87852), False, 'import torch\n'), ((87926, 87955), 'horovod.torch.DistributedOptimizer', 'hvd.DistributedOptimizer', (['opt'], {}), '(opt)\n', (87950, 87955), True, 'import horovod.torch as hvd\n'), ((88191, 88201), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (88199, 88201), True, 'import horovod.torch as hvd\n'), ((88973, 88983), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (88981, 88983), True, 'import horovod.torch as hvd\n'), ((88999, 89009), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (89007, 89009), True, 'import horovod.torch as hvd\n'), ((89025, 89035), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (89033, 89035), True, 'import horovod.torch as hvd\n'), ((89158, 89183), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (89181, 89183), False, 'import torch\n'), ((89623, 89682), 'itertools.product', 'itertools.product', (['dtypes', 'dims', 'first_join_ranks', 'cachings'], {}), '(dtypes, dims, first_join_ranks, cachings)\n', (89640, 89682), False, 'import itertools\n'), ((92021, 92031), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (92029, 92031), True, 'import horovod.torch as hvd\n'), ((92047, 92057), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (92055, 92057), True, 'import horovod.torch as hvd\n'), ((92073, 92083), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (92081, 92083), True, 'import horovod.torch as hvd\n'), ((92268, 92292), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (92285, 92292), False, 'import torch\n'), ((92808, 92818), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (92816, 92818), True, 'import horovod.torch as hvd\n'), ((92834, 92844), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (92842, 92844), True, 'import horovod.torch as hvd\n'), ((92860, 92870), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (92868, 92870), True, 'import horovod.torch as hvd\n'), ((93055, 93079), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (93072, 93079), False, 'import torch\n'), ((93761, 93771), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (93769, 93771), True, 'import horovod.torch as hvd\n'), ((95669, 95679), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (95677, 95679), True, 'import horovod.torch as hvd\n'), ((99396, 99410), 'horovod.torch.shutdown', 'hvd.shutdown', ([], {}), '()\n', (99408, 99410), True, 'import horovod.torch as hvd\n'), ((99470, 99480), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (99478, 99480), True, 'import horovod.torch as hvd\n'), ((99725, 99760), 'horovod.torch.DistributedOptimizer', 'hvd.DistributedOptimizer', (['optimizer'], {}), '(optimizer)\n', (99749, 99760), True, 'import horovod.torch as hvd\n'), ((100297, 100307), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (100305, 100307), True, 'import horovod.torch as hvd\n'), ((101354, 101364), 'horovod.torch.init', 'hvd.init', ([], {}), '()\n', (101362, 101364), True, 'import horovod.torch as hvd\n'), ((2753, 2778), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2776, 2778), False, 'import torch\n'), ((2792, 2840), 'common.skip_or_fail_gpu_test', 'skip_or_fail_gpu_test', (['self', '"""No GPUs available"""'], {}), "(self, 'No GPUs available')\n", (2813, 2840), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((3117, 3146), 'os.getenv', 'os.getenv', (['"""HOROVOD_RANK"""', '(-1)'], {}), "('HOROVOD_RANK', -1)\n", (3126, 3146), False, 'import os\n'), ((3441, 3451), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (3449, 3451), True, 'import horovod.torch as hvd\n'), ((3453, 3463), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (3461, 3463), True, 'import horovod.torch as hvd\n'), ((3529, 3539), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (3537, 3539), True, 'import horovod.torch as hvd\n'), ((3541, 3551), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (3549, 3551), True, 'import horovod.torch as hvd\n'), ((2866, 2883), 'platform.system', 'platform.system', ([], {}), '()\n', (2881, 2883), False, 'import platform\n'), ((3819, 3848), 'os.getenv', 'os.getenv', (['"""HOROVOD_RANK"""', '(-1)'], {}), "('HOROVOD_RANK', -1)\n", (3828, 3848), False, 'import os\n'), ((4032, 4052), 'horovod.torch.is_initialized', 'hvd.is_initialized', ([], {}), '()\n', (4050, 4052), True, 'import horovod.torch as hvd\n'), ((4244, 4273), 'os.getenv', 'os.getenv', (['"""HOROVOD_RANK"""', '(-1)'], {}), "('HOROVOD_RANK', -1)\n", (4253, 4273), False, 'import os\n'), ((4750, 4779), 'os.getenv', 'os.getenv', (['"""HOROVOD_SIZE"""', '(-1)'], {}), "('HOROVOD_SIZE', -1)\n", (4759, 4779), False, 'import os\n'), ((5720, 5743), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (5737, 5743), False, 'import torch\n'), ((5894, 5930), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {'average': '(False)'}), '(tensor, average=False)\n', (5907, 5930), True, 'import horovod.torch as hvd\n'), ((6563, 6608), 'torch.allclose', 'torch.allclose', (['summed', 'multiplied', 'threshold'], {}), '(summed, multiplied, threshold)\n', (6577, 6608), False, 'import torch\n'), ((7284, 7307), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (7301, 7307), False, 'import torch\n'), ((7460, 7495), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {'average': '(True)'}), '(tensor, average=True)\n', (7473, 7495), True, 'import horovod.torch as hvd\n'), ((8014, 8057), 'torch.allclose', 'torch.allclose', (['averaged', 'tensor', 'threshold'], {}), '(averaged, tensor, threshold)\n', (8028, 8057), False, 'import torch\n'), ((8747, 8770), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (8764, 8770), False, 'import torch\n'), ((8979, 9016), 'horovod.torch.allreduce_', 'hvd.allreduce_', (['tensor'], {'average': '(False)'}), '(tensor, average=False)\n', (8993, 9016), True, 'import horovod.torch as hvd\n'), ((9618, 9663), 'torch.allclose', 'torch.allclose', (['tensor', 'multiplied', 'threshold'], {}), '(tensor, multiplied, threshold)\n', (9632, 9663), False, 'import torch\n'), ((10439, 10462), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (10456, 10462), False, 'import torch\n'), ((10613, 10655), 'horovod.torch.allreduce_async', 'hvd.allreduce_async', (['tensor'], {'average': '(False)'}), '(tensor, average=False)\n', (10632, 10655), True, 'import horovod.torch as hvd\n'), ((11105, 11128), 'horovod.torch.synchronize', 'hvd.synchronize', (['handle'], {}), '(handle)\n', (11120, 11128), True, 'import horovod.torch as hvd\n'), ((11707, 11752), 'torch.allclose', 'torch.allclose', (['summed', 'multiplied', 'threshold'], {}), '(summed, multiplied, threshold)\n', (11721, 11752), False, 'import torch\n'), ((11980, 12005), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (12003, 12005), False, 'import torch\n'), ((12204, 12229), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (12227, 12229), False, 'import torch\n'), ((12617, 12640), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (12634, 12640), False, 'import torch\n'), ((12880, 12917), 'horovod.torch.allreduce_', 'hvd.allreduce_', (['tensor'], {'average': '(False)'}), '(tensor, average=False)\n', (12894, 12917), True, 'import horovod.torch as hvd\n'), ((13363, 13408), 'torch.allclose', 'torch.allclose', (['tensor', 'multiplied', 'threshold'], {}), '(tensor, multiplied, threshold)\n', (13377, 13408), False, 'import torch\n'), ((14297, 14320), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (14314, 14320), False, 'import torch\n'), ((14333, 14353), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (14347, 14353), True, 'import numpy as np\n'), ((14375, 14394), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (14392, 14394), True, 'import numpy as np\n'), ((14545, 14605), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {'average': '(False)', 'prescale_factor': 'factor'}), '(tensor, average=False, prescale_factor=factor)\n', (14558, 14605), True, 'import horovod.torch as hvd\n'), ((14663, 14704), 'torch.tensor', 'torch.tensor', (['factor'], {'dtype': 'torch.float64'}), '(factor, dtype=torch.float64)\n', (14675, 14704), False, 'import torch\n'), ((16126, 16171), 'torch.allclose', 'torch.allclose', (['summed', 'multiplied', 'threshold'], {}), '(summed, multiplied, threshold)\n', (16140, 16171), False, 'import torch\n'), ((17062, 17085), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (17079, 17085), False, 'import torch\n'), ((17098, 17118), 'numpy.random.seed', 'np.random.seed', (['(1234)'], {}), '(1234)\n', (17112, 17118), True, 'import numpy as np\n'), ((17140, 17159), 'numpy.random.uniform', 'np.random.uniform', ([], {}), '()\n', (17157, 17159), True, 'import numpy as np\n'), ((17310, 17371), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {'average': '(False)', 'postscale_factor': 'factor'}), '(tensor, average=False, postscale_factor=factor)\n', (17323, 17371), True, 'import horovod.torch as hvd\n'), ((17429, 17470), 'torch.tensor', 'torch.tensor', (['factor'], {'dtype': 'torch.float64'}), '(factor, dtype=torch.float64)\n', (17441, 17470), False, 'import torch\n'), ((18904, 18949), 'torch.allclose', 'torch.allclose', (['summed', 'multiplied', 'threshold'], {}), '(summed, multiplied, threshold)\n', (18918, 18949), False, 'import torch\n'), ((19576, 19597), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {}), '(tensor)\n', (19589, 19597), True, 'import horovod.torch as hvd\n'), ((19996, 20017), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {}), '(tensor)\n', (20009, 20017), True, 'import horovod.torch as hvd\n'), ((20642, 20664), 'torch.IntTensor', 'torch.IntTensor', (['*dims'], {}), '(*dims)\n', (20657, 20664), False, 'import torch\n'), ((20700, 20724), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (20717, 20724), False, 'import torch\n'), ((20751, 20772), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {}), '(tensor)\n', (20764, 20772), True, 'import horovod.torch as hvd\n'), ((21148, 21173), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (21171, 21173), False, 'import torch\n'), ((21238, 21280), 'os.environ.get', 'os.environ.get', (['"""HOROVOD_MIXED_INSTALL"""', '(0)'], {}), "('HOROVOD_MIXED_INSTALL', 0)\n", (21252, 21280), False, 'import os\n'), ((21757, 21786), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['*dims'], {}), '(*dims)\n', (21779, 21786), False, 'import torch\n'), ((21822, 21846), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (21839, 21846), False, 'import torch\n'), ((21873, 21894), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {}), '(tensor)\n', (21886, 21894), True, 'import horovod.torch as hvd\n'), ((23268, 23291), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (23285, 23291), False, 'import torch\n'), ((23478, 23514), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {'average': '(False)'}), '(tensor, average=False)\n', (23491, 23514), True, 'import horovod.torch as hvd\n'), ((23719, 23754), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (23733, 23754), True, 'import numpy as np\n'), ((24440, 24463), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (24457, 24463), False, 'import torch\n'), ((24650, 24685), 'horovod.torch.allreduce', 'hvd.allreduce', (['tensor'], {'average': '(True)'}), '(tensor, average=True)\n', (24663, 24685), True, 'import horovod.torch as hvd\n'), ((24845, 24864), 'numpy.ones', 'np.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (24852, 24864), True, 'import numpy as np\n'), ((24883, 24918), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (24897, 24918), True, 'import numpy as np\n'), ((25760, 25783), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (25777, 25783), False, 'import torch\n'), ((25980, 26025), 'horovod.torch.grouped_allreduce', 'hvd.grouped_allreduce', (['tensors'], {'average': '(False)'}), '(tensors, average=False)\n', (26001, 26025), True, 'import horovod.torch as hvd\n'), ((27528, 27551), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (27545, 27551), False, 'import torch\n'), ((27750, 27794), 'horovod.torch.grouped_allreduce', 'hvd.grouped_allreduce', (['tensors'], {'average': '(True)'}), '(tensors, average=True)\n', (27771, 27794), True, 'import horovod.torch as hvd\n'), ((29245, 29268), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (29262, 29268), False, 'import torch\n'), ((29547, 29593), 'horovod.torch.grouped_allreduce_', 'hvd.grouped_allreduce_', (['tensors'], {'average': '(False)'}), '(tensors, average=False)\n', (29569, 29593), True, 'import horovod.torch as hvd\n'), ((30648, 30673), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (30671, 30673), False, 'import torch\n'), ((30869, 30914), 'horovod.torch.grouped_allreduce', 'hvd.grouped_allreduce', (['tensors'], {'average': '(False)'}), '(tensors, average=False)\n', (30890, 30914), True, 'import horovod.torch as hvd\n'), ((31565, 31588), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (31582, 31588), False, 'import torch\n'), ((31860, 31905), 'horovod.torch.grouped_allreduce', 'hvd.grouped_allreduce', (['tensors'], {'average': '(False)'}), '(tensors, average=False)\n', (31881, 31905), True, 'import horovod.torch as hvd\n'), ((32940, 32963), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (32957, 32963), False, 'import torch\n'), ((33235, 33279), 'horovod.torch.grouped_allreduce', 'hvd.grouped_allreduce', (['tensors'], {'average': '(True)'}), '(tensors, average=True)\n', (33256, 33279), True, 'import horovod.torch as hvd\n'), ((33493, 33512), 'numpy.ones', 'np.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (33500, 33512), True, 'import numpy as np\n'), ((34752, 34773), 'horovod.torch.allgather', 'hvd.allgather', (['tensor'], {}), '(tensor)\n', (34765, 34773), True, 'import horovod.torch as hvd\n'), ((36643, 36664), 'horovod.torch.allgather', 'hvd.allgather', (['tensor'], {}), '(tensor)\n', (36656, 36664), True, 'import horovod.torch as hvd\n'), ((38313, 38340), 'horovod.torch.allgather_async', 'hvd.allgather_async', (['tensor'], {}), '(tensor)\n', (38332, 38340), True, 'import horovod.torch as hvd\n'), ((38679, 38702), 'horovod.torch.synchronize', 'hvd.synchronize', (['handle'], {}), '(handle)\n', (38694, 38702), True, 'import horovod.torch as hvd\n'), ((39773, 39794), 'horovod.torch.allgather', 'hvd.allgather', (['tensor'], {}), '(tensor)\n', (39786, 39794), True, 'import horovod.torch as hvd\n'), ((40383, 40412), 'torch.IntTensor', 'torch.IntTensor', (['*tensor_size'], {}), '(*tensor_size)\n', (40398, 40412), False, 'import torch\n'), ((40448, 40479), 'torch.FloatTensor', 'torch.FloatTensor', (['*tensor_size'], {}), '(*tensor_size)\n', (40465, 40479), False, 'import torch\n'), ((40506, 40527), 'horovod.torch.allgather', 'hvd.allgather', (['tensor'], {}), '(tensor)\n', (40519, 40527), True, 'import horovod.torch as hvd\n'), ((42555, 42582), 'torch.cat', 'torch.cat', (['grad_list'], {'dim': '(0)'}), '(grad_list, dim=0)\n', (42564, 42582), False, 'import torch\n'), ((42607, 42628), 'horovod.torch.allgather', 'hvd.allgather', (['tensor'], {}), '(tensor)\n', (42620, 42628), True, 'import horovod.torch as hvd\n'), ((42850, 42885), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (42864, 42885), True, 'import numpy as np\n'), ((44383, 44415), 'horovod.torch.broadcast', 'hvd.broadcast', (['tensor', 'root_rank'], {}), '(tensor, root_rank)\n', (44396, 44415), True, 'import horovod.torch as hvd\n'), ((46174, 46207), 'horovod.torch.broadcast_', 'hvd.broadcast_', (['tensor', 'root_rank'], {}), '(tensor, root_rank)\n', (46188, 46207), True, 'import horovod.torch as hvd\n'), ((47190, 47214), 'horovod.torch.broadcast', 'hvd.broadcast', (['tensor', '(0)'], {}), '(tensor, 0)\n', (47203, 47214), True, 'import horovod.torch as hvd\n'), ((47806, 47835), 'torch.IntTensor', 'torch.IntTensor', (['*tensor_size'], {}), '(*tensor_size)\n', (47821, 47835), False, 'import torch\n'), ((47871, 47902), 'torch.FloatTensor', 'torch.FloatTensor', (['*tensor_size'], {}), '(*tensor_size)\n', (47888, 47902), False, 'import torch\n'), ((47929, 47953), 'horovod.torch.broadcast', 'hvd.broadcast', (['tensor', '(0)'], {}), '(tensor, 0)\n', (47942, 47953), True, 'import horovod.torch as hvd\n'), ((48540, 48567), 'horovod.torch.broadcast', 'hvd.broadcast', (['tensor', 'rank'], {}), '(tensor, rank)\n', (48553, 48567), True, 'import horovod.torch as hvd\n'), ((50386, 50418), 'horovod.torch.broadcast', 'hvd.broadcast', (['tensor', 'root_rank'], {}), '(tensor, root_rank)\n', (50399, 50418), True, 'import horovod.torch as hvd\n'), ((50677, 50712), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (50691, 50712), True, 'import numpy as np\n'), ((51164, 51180), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (51178, 51180), True, 'import horovod.torch as hvd\n'), ((52096, 52114), 'torch.Tensor', 'torch.Tensor', (['vals'], {}), '(vals)\n', (52108, 52114), False, 'import torch\n'), ((52275, 52325), 'torch.tensor', 'torch.tensor', (['([rank + 1] * size)'], {'dtype': 'torch.int32'}), '([rank + 1] * size, dtype=torch.int32)\n', (52287, 52325), False, 'import torch\n'), ((52423, 52451), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor', 'splits'], {}), '(tensor, splits)\n', (52435, 52451), True, 'import horovod.torch as hvd\n'), ((53330, 53346), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (53344, 53346), True, 'import horovod.torch as hvd\n'), ((54262, 54280), 'torch.Tensor', 'torch.Tensor', (['vals'], {}), '(vals)\n', (54274, 54280), False, 'import torch\n'), ((54500, 54520), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor'], {}), '(tensor)\n', (54512, 54520), True, 'import horovod.torch as hvd\n'), ((55167, 55192), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (55190, 55192), False, 'import torch\n'), ((55252, 55268), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (55266, 55268), True, 'import horovod.torch as hvd\n'), ((56130, 56148), 'torch.Tensor', 'torch.Tensor', (['vals'], {}), '(vals)\n', (56142, 56148), False, 'import torch\n'), ((56309, 56374), 'torch.tensor', 'torch.tensor', (['([rank + 1] * size)'], {'dtype': 'torch.int32', 'device': '"""cuda"""'}), "([rank + 1] * size, dtype=torch.int32, device='cuda')\n", (56321, 56374), False, 'import torch\n'), ((56472, 56500), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor', 'splits'], {}), '(tensor, splits)\n', (56484, 56500), True, 'import horovod.torch as hvd\n'), ((57647, 57663), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (57661, 57663), True, 'import horovod.torch as hvd\n'), ((57817, 57853), 'torch.empty', 'torch.empty', (['size'], {'dtype': 'torch.int32'}), '(size, dtype=torch.int32)\n', (57828, 57853), False, 'import torch\n'), ((57889, 57927), 'torch.empty', 'torch.empty', (['size'], {'dtype': 'torch.float32'}), '(size, dtype=torch.float32)\n', (57900, 57927), False, 'import torch\n'), ((57953, 57973), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor'], {}), '(tensor)\n', (57965, 57973), True, 'import horovod.torch as hvd\n'), ((58598, 58614), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (58612, 58614), True, 'import horovod.torch as hvd\n'), ((58790, 58810), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor'], {}), '(tensor)\n', (58802, 58810), True, 'import horovod.torch as hvd\n'), ((59273, 59289), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (59287, 59289), True, 'import horovod.torch as hvd\n'), ((59518, 59546), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor', 'splits'], {}), '(tensor, splits)\n', (59530, 59546), True, 'import horovod.torch as hvd\n'), ((59990, 60006), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (60004, 60006), True, 'import horovod.torch as hvd\n'), ((60234, 60262), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor', 'splits'], {}), '(tensor, splits)\n', (60246, 60262), True, 'import horovod.torch as hvd\n'), ((60866, 60882), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (60880, 60882), True, 'import horovod.torch as hvd\n'), ((61139, 61159), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor'], {}), '(tensor)\n', (61151, 61159), True, 'import horovod.torch as hvd\n'), ((61534, 61550), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (61548, 61550), True, 'import horovod.torch as hvd\n'), ((62123, 62141), 'torch.Tensor', 'torch.Tensor', (['vals'], {}), '(vals)\n', (62135, 62141), False, 'import torch\n'), ((62394, 62444), 'torch.tensor', 'torch.tensor', (['([rank + 1] * size)'], {'dtype': 'torch.int32'}), '([rank + 1] * size, dtype=torch.int32)\n', (62406, 62444), False, 'import torch\n'), ((62486, 62514), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor', 'splits'], {}), '(tensor, splits)\n', (62498, 62514), True, 'import horovod.torch as hvd\n'), ((62682, 62703), 'numpy.ones', 'np.ones', (['tensor.shape'], {}), '(tensor.shape)\n', (62689, 62703), True, 'import numpy as np\n'), ((62722, 62757), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (62736, 62757), True, 'import numpy as np\n'), ((63227, 63243), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (63241, 63243), True, 'import horovod.torch as hvd\n'), ((63816, 63834), 'torch.Tensor', 'torch.Tensor', (['vals'], {}), '(vals)\n', (63828, 63834), False, 'import torch\n'), ((64090, 64110), 'horovod.torch.alltoall', 'hvd.alltoall', (['tensor'], {}), '(tensor)\n', (64102, 64110), True, 'import horovod.torch as hvd\n'), ((64278, 64299), 'numpy.ones', 'np.ones', (['tensor.shape'], {}), '(tensor.shape)\n', (64285, 64299), True, 'import numpy as np\n'), ((64318, 64353), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (64332, 64353), True, 'import numpy as np\n'), ((66994, 67035), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_pred', 'y'], {'size_average': '(False)'}), '(y_pred, y, size_average=False)\n', (67004, 67035), True, 'import torch.nn.functional as F\n'), ((69221, 69274), 'horovod.torch.broadcast_optimizer_state', 'hvd.broadcast_optimizer_state', (['optimizer'], {'root_rank': '(0)'}), '(optimizer, root_rank=0)\n', (69250, 69274), True, 'import horovod.torch as hvd\n'), ((70241, 70266), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (70264, 70266), False, 'import torch\n'), ((70447, 70500), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.cuda.FloatTensor'], {}), '(torch.cuda.FloatTensor)\n', (70476, 70500), False, 'import torch\n'), ((70570, 70618), 'torch.set_default_tensor_type', 'torch.set_default_tensor_type', (['torch.FloatTensor'], {}), '(torch.FloatTensor)\n', (70599, 70618), False, 'import torch\n'), ((72402, 72443), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_pred', 'y'], {'size_average': '(False)'}), '(y_pred, y, size_average=False)\n', (72412, 72443), True, 'import torch.nn.functional as F\n'), ((72548, 72601), 'horovod.torch.broadcast_optimizer_state', 'hvd.broadcast_optimizer_state', (['optimizer'], {'root_rank': '(0)'}), '(optimizer, root_rank=0)\n', (72577, 72601), True, 'import horovod.torch as hvd\n'), ((73272, 73313), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_pred', 'y'], {'size_average': '(False)'}), '(y_pred, y, size_average=False)\n', (73282, 73313), True, 'import torch.nn.functional as F\n'), ((74868, 74878), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (74876, 74878), True, 'import horovod.torch as hvd\n'), ((74891, 74901), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (74899, 74901), True, 'import horovod.torch as hvd\n'), ((75066, 75076), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (75074, 75076), True, 'import horovod.torch as hvd\n'), ((75188, 75198), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (75196, 75198), True, 'import horovod.torch as hvd\n'), ((75587, 75623), 'torch.ones', 'torch.ones', (['tensor_size'], {'dtype': 'dtype'}), '(tensor_size, dtype=dtype)\n', (75597, 75623), False, 'import torch\n'), ((75929, 75949), 'numpy.ones', 'np.ones', (['tensor_size'], {}), '(tensor_size)\n', (75936, 75949), True, 'import numpy as np\n'), ((76132, 76168), 'torch.ones', 'torch.ones', (['tensor_size'], {'dtype': 'dtype'}), '(tensor_size, dtype=dtype)\n', (76142, 76168), False, 'import torch\n'), ((79491, 79516), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (79514, 79516), False, 'import torch\n'), ((79857, 79882), 'torch.cuda.device_count', 'torch.cuda.device_count', ([], {}), '()\n', (79880, 79882), False, 'import torch\n'), ((82734, 82798), 'horovod.torch.DistributedOptimizer', 'hvd.DistributedOptimizer', (['opt'], {'named_parameters': 'named_parameters'}), '(opt, named_parameters=named_parameters)\n', (82758, 82798), True, 'import horovod.torch as hvd\n'), ((85376, 85390), 'torch.zeros', 'torch.zeros', (['(1)'], {}), '(1)\n', (85387, 85390), False, 'import torch\n'), ((87054, 87090), 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {'record': '(True)'}), '(record=True)\n', (87077, 87090), False, 'import warnings\n'), ((89696, 89719), 'torch.manual_seed', 'torch.manual_seed', (['(1234)'], {}), '(1234)\n', (89713, 89719), False, 'import torch\n'), ((92331, 92356), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (92354, 92356), False, 'import torch\n'), ((93433, 93458), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (93456, 93458), False, 'import torch\n'), ((93678, 93703), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (93701, 93703), False, 'import torch\n'), ((94401, 94434), 'horovod.torch.SyncBatchNorm', 'hvd.SyncBatchNorm', ([], {'num_features': '(4)'}), '(num_features=4)\n', (94418, 94434), True, 'import horovod.torch as hvd\n'), ((94496, 94532), 'torch.nn.BatchNorm1d', 'torch.nn.BatchNorm1d', ([], {'num_features': '(4)'}), '(num_features=4)\n', (94516, 94532), False, 'import torch\n'), ((94937, 94997), 'torch.allclose', 'torch.allclose', (['sync_bn.running_mean', 'bn.running_mean', '(1e-06)'], {}), '(sync_bn.running_mean, bn.running_mean, 1e-06)\n', (94951, 94997), False, 'import torch\n'), ((95016, 95074), 'torch.allclose', 'torch.allclose', (['sync_bn.running_var', 'bn.running_var', '(1e-06)'], {}), '(sync_bn.running_var, bn.running_var, 1e-06)\n', (95030, 95074), False, 'import torch\n'), ((96439, 96449), 'common.temppath', 'temppath', ([], {}), '()\n', (96447, 96449), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((96473, 96517), 'horovod.torch.start_timeline', 'hvd.start_timeline', (['fname1'], {'mark_cycles': '(True)'}), '(fname1, mark_cycles=True)\n', (96491, 96517), True, 'import horovod.torch as hvd\n'), ((96844, 96859), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (96854, 96859), False, 'import time\n'), ((96872, 96891), 'horovod.torch.stop_timeline', 'hvd.stop_timeline', ([], {}), '()\n', (96889, 96891), True, 'import horovod.torch as hvd\n'), ((96989, 96999), 'common.temppath', 'temppath', ([], {}), '()\n', (96997, 96999), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((97023, 97067), 'horovod.torch.start_timeline', 'hvd.start_timeline', (['fname2'], {'mark_cycles': '(True)'}), '(fname2, mark_cycles=True)\n', (97041, 97067), True, 'import horovod.torch as hvd\n'), ((97080, 97095), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (97090, 97095), False, 'import time\n'), ((97417, 97432), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (97427, 97432), False, 'import time\n'), ((97445, 97464), 'horovod.torch.stop_timeline', 'hvd.stop_timeline', ([], {}), '()\n', (97462, 97464), True, 'import horovod.torch as hvd\n'), ((97583, 97593), 'common.temppath', 'temppath', ([], {}), '()\n', (97591, 97593), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((97685, 97730), 'horovod.torch.start_timeline', 'hvd.start_timeline', (['fname3'], {'mark_cycles': '(False)'}), '(fname3, mark_cycles=False)\n', (97703, 97730), True, 'import horovod.torch as hvd\n'), ((97743, 97758), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (97753, 97758), False, 'import time\n'), ((98074, 98093), 'horovod.torch.stop_timeline', 'hvd.stop_timeline', ([], {}), '()\n', (98091, 98093), True, 'import horovod.torch as hvd\n'), ((98230, 98240), 'common.temppath', 'temppath', ([], {}), '()\n', (98238, 98240), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((98332, 98376), 'horovod.torch.start_timeline', 'hvd.start_timeline', (['fname4'], {'mark_cycles': '(True)'}), '(fname4, mark_cycles=True)\n', (98350, 98376), True, 'import horovod.torch as hvd\n'), ((98389, 98404), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (98399, 98404), False, 'import time\n'), ((98726, 98741), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (98736, 98741), False, 'import time\n'), ((98754, 98773), 'horovod.torch.stop_timeline', 'hvd.stop_timeline', ([], {}), '()\n', (98771, 98773), True, 'import horovod.torch as hvd\n'), ((98837, 98847), 'common.temppath', 'temppath', ([], {}), '()\n', (98845, 98847), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((98939, 98984), 'horovod.torch.start_timeline', 'hvd.start_timeline', (['fname5'], {'mark_cycles': '(False)'}), '(fname5, mark_cycles=False)\n', (98957, 98984), True, 'import horovod.torch as hvd\n'), ((98997, 99042), 'horovod.torch.start_timeline', 'hvd.start_timeline', (['fname5'], {'mark_cycles': '(False)'}), '(fname5, mark_cycles=False)\n', (99015, 99042), True, 'import horovod.torch as hvd\n'), ((99055, 99070), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (99065, 99070), False, 'import time\n'), ((99289, 99304), 'time.sleep', 'time.sleep', (['(0.2)'], {}), '(0.2)\n', (99299, 99304), False, 'import time\n'), ((99317, 99336), 'horovod.torch.stop_timeline', 'hvd.stop_timeline', ([], {}), '()\n', (99334, 99336), True, 'import horovod.torch as hvd\n'), ((95540, 95557), 'platform.system', 'platform.system', ([], {}), '()\n', (95555, 95557), False, 'import platform\n'), ((99512, 99529), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(10)'], {}), '(10, 10)\n', (99521, 99529), True, 'import torch.nn as nn\n'), ((99531, 99548), 'torch.nn.Linear', 'nn.Linear', (['(10)', '(10)'], {}), '(10, 10)\n', (99540, 99548), True, 'import torch.nn as nn\n'), ((100057, 100067), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (100065, 100067), True, 'import horovod.torch as hvd\n'), ((101037, 101099), 'horovod.torch.DistributedOptimizer', 'hvd.DistributedOptimizer', (['opt'], {'sparse_as_dense': 'sparse_as_dense'}), '(opt, sparse_as_dense=sparse_as_dense)\n', (101061, 101099), True, 'import horovod.torch as hvd\n'), ((101487, 101505), 'torch.rand', 'torch.rand', (['*shape'], {}), '(*shape)\n', (101497, 101505), False, 'import torch\n'), ((10675, 10691), 'horovod.torch.poll', 'hvd.poll', (['handle'], {}), '(handle)\n', (10683, 10691), True, 'import horovod.torch as hvd\n'), ((12232, 12248), 'horovod.torch.local_size', 'hvd.local_size', ([], {}), '()\n', (12246, 12248), True, 'import horovod.torch as hvd\n'), ((19507, 19531), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (19524, 19531), False, 'import torch\n'), ((19927, 19951), 'torch.FloatTensor', 'torch.FloatTensor', (['*dims'], {}), '(*dims)\n', (19944, 19951), False, 'import torch\n'), ((22579, 22629), 'horovod.torch.allreduce_async', 'hvd.allreduce_async', (['tensor'], {'name': '"""duplicate_name"""'}), "(tensor, name='duplicate_name')\n", (22598, 22629), True, 'import horovod.torch as hvd\n'), ((23674, 23693), 'numpy.ones', 'np.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (23681, 23693), True, 'import numpy as np\n'), ((30761, 30782), 'torch.FloatTensor', 'torch.FloatTensor', (['(10)'], {}), '(10)\n', (30778, 30782), False, 'import torch\n'), ((30797, 30823), 'torch.cuda.FloatTensor', 'torch.cuda.FloatTensor', (['(10)'], {}), '(10)\n', (30819, 30823), False, 'import torch\n'), ((32119, 32138), 'numpy.ones', 'np.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (32126, 32138), True, 'import numpy as np\n'), ((32207, 32242), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (32221, 32242), True, 'import numpy as np\n'), ((33574, 33609), 'numpy.linalg.norm', 'np.linalg.norm', (['(expected - grad_out)'], {}), '(expected - grad_out)\n', (33588, 33609), True, 'import numpy as np\n'), ((38360, 38376), 'horovod.torch.poll', 'hvd.poll', (['handle'], {}), '(handle)\n', (38368, 38376), True, 'import horovod.torch as hvd\n'), ((41212, 41262), 'horovod.torch.allgather_async', 'hvd.allgather_async', (['tensor'], {'name': '"""duplicate_name"""'}), "(tensor, name='duplicate_name')\n", (41231, 41262), True, 'import horovod.torch as hvd\n'), ((42746, 42794), 'numpy.ones', 'np.ones', (['([tensor_sizes[rank]] + [17] * (dim - 1))'], {}), '([tensor_sizes[rank]] + [17] * (dim - 1))\n', (42753, 42794), True, 'import numpy as np\n'), ((48474, 48504), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * 3)'], {}), '(*([17] * 3))\n', (48491, 48504), False, 'import torch\n'), ((49265, 49328), 'horovod.torch.broadcast_async', 'hvd.broadcast_async', (['tensor'], {'root_rank': '(0)', 'name': '"""duplicate_name"""'}), "(tensor, root_rank=0, name='duplicate_name')\n", (49284, 49328), True, 'import horovod.torch as hvd\n'), ((50635, 50654), 'numpy.ones', 'np.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (50642, 50654), True, 'import numpy as np\n'), ((51185, 51201), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (51199, 51201), True, 'import horovod.torch as hvd\n'), ((52218, 52252), 'torch.cat', 'torch.cat', (['(tensor, tensor)'], {'dim': '(1)'}), '((tensor, tensor), dim=1)\n', (52227, 52252), False, 'import torch\n'), ((53351, 53367), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (53365, 53367), True, 'import horovod.torch as hvd\n'), ((54384, 54418), 'torch.cat', 'torch.cat', (['(tensor, tensor)'], {'dim': '(1)'}), '((tensor, tensor), dim=1)\n', (54393, 54418), False, 'import torch\n'), ((55273, 55289), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (55287, 55289), True, 'import horovod.torch as hvd\n'), ((56252, 56286), 'torch.cat', 'torch.cat', (['(tensor, tensor)'], {'dim': '(1)'}), '((tensor, tensor), dim=1)\n', (56261, 56286), False, 'import torch\n'), ((57668, 57684), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (57682, 57684), True, 'import horovod.torch as hvd\n'), ((58619, 58635), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (58633, 58635), True, 'import horovod.torch as hvd\n'), ((59294, 59310), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (59308, 59310), True, 'import horovod.torch as hvd\n'), ((60011, 60027), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (60025, 60027), True, 'import horovod.torch as hvd\n'), ((60887, 60903), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (60901, 60903), True, 'import horovod.torch as hvd\n'), ((61555, 61571), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (61569, 61571), True, 'import horovod.torch as hvd\n'), ((62245, 62279), 'torch.cat', 'torch.cat', (['(tensor, tensor)'], {'dim': '(1)'}), '((tensor, tensor), dim=1)\n', (62254, 62279), False, 'import torch\n'), ((63248, 63264), 'horovod.torch.nccl_built', 'hvd.nccl_built', ([], {}), '()\n', (63262, 63264), True, 'import horovod.torch as hvd\n'), ((63938, 63972), 'torch.cat', 'torch.cat', (['(tensor, tensor)'], {'dim': '(1)'}), '((tensor, tensor), dim=1)\n', (63947, 63972), False, 'import torch\n'), ((64655, 64675), 'torch.randn', 'torch.randn', (['N', 'D_in'], {}), '(N, D_in)\n', (64666, 64675), False, 'import torch\n'), ((64705, 64726), 'torch.randn', 'torch.randn', (['N', 'D_out'], {}), '(N, D_out)\n', (64716, 64726), False, 'import torch\n'), ((65097, 65121), 'torch.nn.Linear', 'torch.nn.Linear', (['D_in', 'H'], {}), '(D_in, H)\n', (65112, 65121), False, 'import torch\n'), ((65139, 65154), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (65152, 65154), False, 'import torch\n'), ((65172, 65197), 'torch.nn.Linear', 'torch.nn.Linear', (['H', 'D_out'], {}), '(H, D_out)\n', (65187, 65197), False, 'import torch\n'), ((66382, 66420), 'torch.optim.Optimizer.__subclasses__', 'torch.optim.Optimizer.__subclasses__', ([], {}), '()\n', (66418, 66420), False, 'import torch\n'), ((67270, 67316), 'horovod.torch.broadcast_', 'hvd.broadcast_', (['model_param_value'], {'root_rank': '(0)'}), '(model_param_value, root_rank=0)\n', (67284, 67316), True, 'import horovod.torch as hvd\n'), ((67516, 67548), 'torch.is_tensor', 'torch.is_tensor', (['opt_param_value'], {}), '(opt_param_value)\n', (67531, 67548), False, 'import torch\n'), ((67900, 67910), 'common.temppath', 'temppath', ([], {}), '()\n', (67908, 67910), False, 'from common import mpi_env_rank_and_size, skip_or_fail_gpu_test, temppath\n'), ((69818, 69850), 'torch.is_tensor', 'torch.is_tensor', (['opt_param_value'], {}), '(opt_param_value)\n', (69833, 69850), False, 'import torch\n'), ((70740, 70760), 'torch.randn', 'torch.randn', (['N', 'D_in'], {}), '(N, D_in)\n', (70751, 70760), False, 'import torch\n'), ((70790, 70811), 'torch.randn', 'torch.randn', (['N', 'D_out'], {}), '(N, D_out)\n', (70801, 70811), False, 'import torch\n'), ((71252, 71276), 'torch.nn.Linear', 'torch.nn.Linear', (['D_in', 'H'], {}), '(D_in, H)\n', (71267, 71276), False, 'import torch\n'), ((71294, 71309), 'torch.nn.ReLU', 'torch.nn.ReLU', ([], {}), '()\n', (71307, 71309), False, 'import torch\n'), ((71327, 71352), 'torch.nn.Linear', 'torch.nn.Linear', (['H', 'D_out'], {}), '(H, D_out)\n', (71342, 71352), False, 'import torch\n'), ((71996, 72034), 'torch.optim.Optimizer.__subclasses__', 'torch.optim.Optimizer.__subclasses__', ([], {}), '()\n', (72032, 72034), False, 'import torch\n'), ((73671, 73686), 'torch.nn.Parameter', 'nn.Parameter', (['b'], {}), '(b)\n', (73683, 73686), True, 'import torch.nn as nn\n'), ((74655, 74665), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (74663, 74665), True, 'import horovod.torch as hvd\n'), ((76548, 76568), 'numpy.ones', 'np.ones', (['tensor_size'], {}), '(tensor_size)\n', (76555, 76568), True, 'import numpy as np\n'), ((77087, 77107), 'torch.randn', 'torch.randn', (['N', 'D_in'], {}), '(N, D_in)\n', (77098, 77107), False, 'import torch\n'), ((77137, 77158), 'torch.randn', 'torch.randn', (['N', 'D_out'], {}), '(N, D_out)\n', (77148, 77158), False, 'import torch\n'), ((77562, 77586), 'torch.nn.Linear', 'torch.nn.Linear', (['D_in', 'H'], {}), '(D_in, H)\n', (77577, 77586), False, 'import torch\n'), ((77614, 77639), 'torch.nn.Linear', 'torch.nn.Linear', (['H', 'D_out'], {}), '(H, D_out)\n', (77629, 77639), False, 'import torch\n'), ((77667, 77696), 'torch.nn.Linear', 'torch.nn.Linear', (['D_out', 'D_out'], {}), '(D_out, D_out)\n', (77682, 77696), False, 'import torch\n'), ((78445, 78483), 'torch.optim.Optimizer.__subclasses__', 'torch.optim.Optimizer.__subclasses__', ([], {}), '()\n', (78481, 78483), False, 'import torch\n'), ((79097, 79139), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_pred1', 'y'], {'size_average': '(False)'}), '(y_pred1, y, size_average=False)\n', (79107, 79139), True, 'import torch.nn.functional as F\n'), ((79181, 79223), 'torch.nn.functional.mse_loss', 'F.mse_loss', (['y_pred2', 'y'], {'size_average': '(False)'}), '(y_pred2, y, size_average=False)\n', (79191, 79223), True, 'import torch.nn.functional as F\n'), ((79885, 79901), 'horovod.torch.local_size', 'hvd.local_size', ([], {}), '()\n', (79899, 79901), True, 'import horovod.torch as hvd\n'), ((81108, 81125), 'horovod.torch.mpi_enabled', 'hvd.mpi_enabled', ([], {}), '()\n', (81123, 81125), True, 'import horovod.torch as hvd\n'), ((81133, 81158), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (81156, 81158), False, 'import torch\n'), ((85150, 85166), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (85160, 85166), False, 'import torch\n'), ((85196, 85212), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (85206, 85212), False, 'import torch\n'), ((85311, 85328), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (85322, 85328), False, 'import torch\n'), ((86465, 86482), 'torch.zeros', 'torch.zeros', (['(1)', '(1)'], {}), '(1, 1)\n', (86476, 86482), False, 'import torch\n'), ((86512, 86528), 'torch.ones', 'torch.ones', (['(1)', '(1)'], {}), '(1, 1)\n', (86522, 86528), False, 'import torch\n'), ((87574, 87600), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(100)', '(1)'], {}), '(1, 100, 1)\n', (87589, 87600), False, 'import torch\n'), ((87630, 87656), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(100)', '(1)', '(1)'], {}), '(100, 1, 1)\n', (87645, 87656), False, 'import torch\n'), ((88344, 88370), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(100)', '(1)'], {}), '(1, 100, 1)\n', (88359, 88370), False, 'import torch\n'), ((88400, 88426), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(100)', '(1)', '(1)'], {}), '(100, 1, 1)\n', (88415, 88426), False, 'import torch\n'), ((90246, 90306), 'horovod.torch.allreduce_async', 'hvd.allreduce_async', (['tensor_a'], {'name': '"""tensor_a"""', 'average': '(True)'}), "(tensor_a, name='tensor_a', average=True)\n", (90265, 90306), True, 'import horovod.torch as hvd\n'), ((90334, 90394), 'horovod.torch.allreduce_async', 'hvd.allreduce_async', (['tensor_b'], {'name': '"""tensor_b"""', 'average': '(True)'}), "(tensor_b, name='tensor_b', average=True)\n", (90353, 90394), True, 'import horovod.torch as hvd\n'), ((90424, 90449), 'horovod.torch.synchronize', 'hvd.synchronize', (['handle_a'], {}), '(handle_a)\n', (90439, 90449), True, 'import horovod.torch as hvd\n'), ((90479, 90504), 'horovod.torch.synchronize', 'hvd.synchronize', (['handle_b'], {}), '(handle_b)\n', (90494, 90504), True, 'import horovod.torch as hvd\n'), ((90737, 90797), 'horovod.torch.allreduce_async', 'hvd.allreduce_async', (['tensor_a'], {'name': '"""tensor_a"""', 'average': '(True)'}), "(tensor_a, name='tensor_a', average=True)\n", (90756, 90797), True, 'import horovod.torch as hvd\n'), ((90825, 90885), 'horovod.torch.allreduce_async', 'hvd.allreduce_async', (['tensor_b'], {'name': '"""tensor_b"""', 'average': '(True)'}), "(tensor_b, name='tensor_b', average=True)\n", (90844, 90885), True, 'import horovod.torch as hvd\n'), ((90915, 90940), 'horovod.torch.synchronize', 'hvd.synchronize', (['handle_a'], {}), '(handle_a)\n', (90930, 90940), True, 'import horovod.torch as hvd\n'), ((90970, 90995), 'horovod.torch.synchronize', 'hvd.synchronize', (['handle_b'], {}), '(handle_b)\n', (90985, 90995), True, 'import horovod.torch as hvd\n'), ((92447, 92457), 'horovod.torch.join', 'hvd.join', ([], {}), '()\n', (92455, 92457), True, 'import horovod.torch as hvd\n'), ((92505, 92526), 'horovod.torch.allgather', 'hvd.allgather', (['tensor'], {}), '(tensor)\n', (92518, 92526), True, 'import horovod.torch as hvd\n'), ((92695, 92711), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (92709, 92711), True, 'import horovod.torch as hvd\n'), ((93130, 93146), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (93144, 93146), True, 'import horovod.torch as hvd\n'), ((93216, 93276), 'horovod.torch.broadcast', 'hvd.broadcast', (['tensor', '(1)'], {'name': '"""test_horovod_join_broadcast"""'}), "(tensor, 1, name='test_horovod_join_broadcast')\n", (93229, 93276), True, 'import horovod.torch as hvd\n'), ((93549, 93559), 'horovod.torch.join', 'hvd.join', ([], {}), '()\n', (93557, 93559), True, 'import horovod.torch as hvd\n'), ((94460, 94476), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (94474, 94476), True, 'import horovod.torch as hvd\n'), ((94553, 94569), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (94567, 94569), True, 'import horovod.torch as hvd\n'), ((95222, 95284), 'horovod.torch.allreduce', 'hvd.allreduce', (['sync_bn.weight.grad'], {'name': '"""sync_bn.weight.grad"""'}), "(sync_bn.weight.grad, name='sync_bn.weight.grad')\n", (95235, 95284), True, 'import horovod.torch as hvd\n'), ((95343, 95401), 'horovod.torch.allreduce', 'hvd.allreduce', (['sync_bn.bias.grad'], {'name': '"""sync_bn.bias.grad"""'}), "(sync_bn.bias.grad, name='sync_bn.bias.grad')\n", (95356, 95401), True, 'import horovod.torch as hvd\n'), ((95457, 95497), 'horovod.torch.allreduce', 'hvd.allreduce', (['ts1.grad'], {'name': '"""ts1.grad"""'}), "(ts1.grad, name='ts1.grad')\n", (95470, 95497), True, 'import horovod.torch as hvd\n'), ((95745, 95755), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (95753, 95755), True, 'import horovod.torch as hvd\n'), ((100702, 100712), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (100710, 100712), True, 'import horovod.torch as hvd\n'), ((100741, 100787), 'torch.LongTensor', 'torch.LongTensor', (['[[1, 2, 4, 5], [4, 3, 2, 9]]'], {}), '([[1, 2, 4, 5], [4, 3, 2, 9]])\n', (100757, 100787), False, 'import torch\n'), ((100828, 100868), 'torch.LongTensor', 'torch.LongTensor', (['[[1, 3, 4], [4, 7, 9]]'], {}), '([[1, 3, 4], [4, 7, 9]])\n', (100844, 100868), False, 'import torch\n'), ((5765, 5797), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (5782, 5797), False, 'import torch\n'), ((7329, 7361), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (7346, 7361), False, 'import torch\n'), ((8792, 8824), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (8809, 8824), False, 'import torch\n'), ((10484, 10516), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (10501, 10516), False, 'import torch\n'), ((12662, 12694), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (12679, 12694), False, 'import torch\n'), ((14416, 14448), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (14433, 14448), False, 'import torch\n'), ((14738, 14754), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (14752, 14754), True, 'import horovod.torch as hvd\n'), ((17181, 17213), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (17198, 17213), False, 'import torch\n'), ((17504, 17520), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (17518, 17520), True, 'import horovod.torch as hvd\n'), ((23313, 23345), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (23330, 23345), False, 'import torch\n'), ((23564, 23586), 'torch.ones', 'torch.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (23574, 23586), False, 'import torch\n'), ((24485, 24517), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (24502, 24517), False, 'import torch\n'), ((24735, 24757), 'torch.ones', 'torch.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (24745, 24757), False, 'import torch\n'), ((26719, 26752), 'torch.allclose', 'torch.allclose', (['t1', 't2', 'threshold'], {}), '(t1, t2, threshold)\n', (26733, 26752), False, 'import torch\n'), ((28429, 28462), 'torch.allclose', 'torch.allclose', (['t1', 't2', 'threshold'], {}), '(t1, t2, threshold)\n', (28443, 28462), False, 'import torch\n'), ((30232, 30265), 'torch.allclose', 'torch.allclose', (['t1', 't2', 'threshold'], {}), '(t1, t2, threshold)\n', (30246, 30265), False, 'import torch\n'), ((50479, 50501), 'torch.ones', 'torch.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (50489, 50501), False, 'import torch\n'), ((62567, 62594), 'torch.ones', 'torch.ones', (['collected.shape'], {}), '(collected.shape)\n', (62577, 62594), False, 'import torch\n'), ((64163, 64190), 'torch.ones', 'torch.ones', (['collected.shape'], {}), '(collected.shape)\n', (64173, 64190), False, 'import torch\n'), ((67599, 67643), 'horovod.torch.broadcast_', 'hvd.broadcast_', (['opt_param_value'], {'root_rank': '(0)'}), '(opt_param_value, root_rank=0)\n', (67613, 67643), True, 'import horovod.torch as hvd\n'), ((67704, 67752), 'horovod.torch.broadcast_object', 'hvd.broadcast_object', (['opt_param_value'], {'name': 'name'}), '(opt_param_value, name=name)\n', (67724, 67752), True, 'import horovod.torch as hvd\n'), ((67940, 67950), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (67948, 67950), True, 'import horovod.torch as hvd\n'), ((68143, 68167), 'torch.save', 'torch.save', (['state', 'fname'], {}), '(state, fname)\n', (68153, 68167), False, 'import torch\n'), ((68259, 68269), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (68267, 68269), True, 'import horovod.torch as hvd\n'), ((68309, 68326), 'torch.load', 'torch.load', (['fname'], {}), '(fname)\n', (68319, 68326), False, 'import torch\n'), ((71402, 71412), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (71410, 71412), True, 'import horovod.torch as hvd\n'), ((74454, 74475), 'torch.eq', 'torch.eq', (['grad', 'bgrad'], {}), '(grad, bgrad)\n', (74462, 74475), False, 'import torch\n'), ((75042, 75052), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (75050, 75052), True, 'import horovod.torch as hvd\n'), ((77841, 77852), 'torch.nn.functional.relu', 'F.relu', (['x1_'], {}), '(x1_)\n', (77847, 77852), True, 'import torch.nn.functional as F\n'), ((89948, 89979), 'torch.FloatTensor', 'torch.FloatTensor', (['*([5] * dim)'], {}), '(*([5] * dim))\n', (89965, 89979), False, 'import torch\n'), ((90082, 90114), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (90099, 90114), False, 'import torch\n'), ((90681, 90691), 'horovod.torch.join', 'hvd.join', ([], {}), '()\n', (90689, 90691), True, 'import horovod.torch as hvd\n'), ((91131, 91141), 'horovod.torch.join', 'hvd.join', ([], {}), '()\n', (91139, 91141), True, 'import horovod.torch as hvd\n'), ((92389, 92405), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (92403, 92405), True, 'import horovod.torch as hvd\n'), ((93491, 93507), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (93505, 93507), True, 'import horovod.torch as hvd\n'), ((93835, 93925), 'torch.tensor', 'torch.tensor', (['[[r, r + 1], [r * 2, r * 2 + 1], [r * 3, r * 3 + 1], [r * 4, r * 4 + 1]]'], {}), '([[r, r + 1], [r * 2, r * 2 + 1], [r * 3, r * 3 + 1], [r * 4, r *\n 4 + 1]])\n', (93847, 93925), False, 'import torch\n'), ((94121, 94183), 'torch.tensor', 'torch.tensor', (['[[r + 1], [r * 2 + 1], [r * 3 + 1], [r * 4 + 1]]'], {}), '([[r + 1], [r * 2 + 1], [r * 3 + 1], [r * 4 + 1]])\n', (94133, 94183), False, 'import torch\n'), ((96238, 96263), 'json.loads', 'json.loads', (['timeline_text'], {}), '(timeline_text)\n', (96248, 96263), False, 'import json\n'), ((100516, 100548), 'torch.nn.Embedding', 'nn.Embedding', (['(10)', '(3)'], {'sparse': '(True)'}), '(10, 3, sparse=True)\n', (100528, 100548), True, 'import torch.nn as nn\n'), ((2470, 2486), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (2484, 2486), True, 'import horovod.torch as hvd\n'), ((14826, 14868), 'os.environ.get', 'os.environ.get', (['"""HOROVOD_MIXED_INSTALL"""', '(0)'], {}), "('HOROVOD_MIXED_INSTALL', 0)\n", (14840, 14868), False, 'import os\n'), ((17592, 17634), 'os.environ.get', 'os.environ.get', (['"""HOROVOD_MIXED_INSTALL"""', '(0)'], {}), "('HOROVOD_MIXED_INSTALL', 0)\n", (17606, 17634), False, 'import os\n'), ((25807, 25839), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (25824, 25839), False, 'import torch\n'), ((27575, 27607), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (27592, 27607), False, 'import torch\n'), ((29292, 29324), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (29309, 29324), False, 'import torch\n'), ((31612, 31644), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (31629, 31644), False, 'import torch\n'), ((31983, 32005), 'torch.ones', 'torch.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (31993, 32005), False, 'import torch\n'), ((32987, 33019), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (33004, 33019), False, 'import torch\n'), ((33357, 33379), 'torch.ones', 'torch.ones', (['([17] * dim)'], {}), '([17] * dim)\n', (33367, 33379), False, 'import torch\n'), ((39695, 39726), 'torch.FloatTensor', 'torch.FloatTensor', (['*tensor_size'], {}), '(*tensor_size)\n', (39712, 39726), False, 'import torch\n'), ((47112, 47143), 'torch.FloatTensor', 'torch.FloatTensor', (['*tensor_size'], {}), '(*tensor_size)\n', (47129, 47143), False, 'import torch\n'), ((80244, 80270), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(100)', '(1)'], {}), '(1, 100, 1)\n', (80259, 80270), False, 'import torch\n'), ((80319, 80345), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(100)', '(1)', '(1)'], {}), '(100, 1, 1)\n', (80334, 80345), False, 'import torch\n'), ((81556, 81582), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(1)', '(100)', '(1)'], {}), '(1, 100, 1)\n', (81571, 81582), False, 'import torch\n'), ((81629, 81655), 'torch.nn.Conv2d', 'torch.nn.Conv2d', (['(100)', '(1)', '(1)'], {}), '(100, 1, 1)\n', (81644, 81655), False, 'import torch\n'), ((90615, 90631), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (90629, 90631), True, 'import horovod.torch as hvd\n'), ((91065, 91081), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (91079, 91081), True, 'import horovod.torch as hvd\n'), ((94597, 94613), 'horovod.torch.local_rank', 'hvd.local_rank', ([], {}), '()\n', (94611, 94613), True, 'import horovod.torch as hvd\n'), ((96544, 96588), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {'dtype': 'torch.float32'}), '([1, 2, 3], dtype=torch.float32)\n', (96556, 96588), False, 'import torch\n'), ((97122, 97166), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {'dtype': 'torch.float32'}), '([1, 2, 3], dtype=torch.float32)\n', (97134, 97166), False, 'import torch\n'), ((97785, 97829), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {'dtype': 'torch.float32'}), '([1, 2, 3], dtype=torch.float32)\n', (97797, 97829), False, 'import torch\n'), ((98431, 98475), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {'dtype': 'torch.float32'}), '([1, 2, 3], dtype=torch.float32)\n', (98443, 98475), False, 'import torch\n'), ((99097, 99141), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {'dtype': 'torch.float32'}), '([1, 2, 3], dtype=torch.float32)\n', (99109, 99141), False, 'import torch\n'), ((99200, 99244), 'torch.tensor', 'torch.tensor', (['[1, 2, 3]'], {'dtype': 'torch.float32'}), '([1, 2, 3], dtype=torch.float32)\n', (99212, 99244), False, 'import torch\n'), ((34620, 34652), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (34637, 34652), False, 'import torch\n'), ((36465, 36526), 'torch.FloatTensor', 'torch.FloatTensor', (['*([tensor_sizes[rank]] + [17] * (dim - 1))'], {}), '(*([tensor_sizes[rank]] + [17] * (dim - 1)))\n', (36482, 36526), False, 'import torch\n'), ((38183, 38213), 'torch.FloatTensor', 'torch.FloatTensor', (['*rank_shape'], {}), '(*rank_shape)\n', (38200, 38213), False, 'import torch\n'), ((42137, 42198), 'torch.FloatTensor', 'torch.FloatTensor', (['*([tensor_sizes[rank]] + [17] * (dim - 1))'], {}), '(*([tensor_sizes[rank]] + [17] * (dim - 1)))\n', (42154, 42198), False, 'import torch\n'), ((42482, 42519), 'torch.ones', 'torch.ones', (['([size] + [17] * (dim - 1))'], {}), '([size] + [17] * (dim - 1))\n', (42492, 42519), False, 'import torch\n'), ((44091, 44123), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (44108, 44123), False, 'import torch\n'), ((44170, 44202), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (44187, 44202), False, 'import torch\n'), ((45882, 45914), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (45899, 45914), False, 'import torch\n'), ((45961, 45993), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (45978, 45993), False, 'import torch\n'), ((50207, 50239), 'torch.FloatTensor', 'torch.FloatTensor', (['*([17] * dim)'], {}), '(*([17] * dim))\n', (50224, 50239), False, 'import torch\n'), ((64890, 64922), 'inspect.getargspec', 'inspect.getargspec', (['cls.__init__'], {}), '(cls.__init__)\n', (64908, 64922), False, 'import inspect\n'), ((71522, 71560), 'inspect.getargspec', 'inspect.getargspec', (['opt_class.__init__'], {}), '(opt_class.__init__)\n', (71540, 71560), False, 'import inspect\n'), ((72695, 72733), 'inspect.getargspec', 'inspect.getargspec', (['opt_class.__init__'], {}), '(opt_class.__init__)\n', (72713, 72733), False, 'import inspect\n'), ((77322, 77354), 'inspect.getargspec', 'inspect.getargspec', (['cls.__init__'], {}), '(cls.__init__)\n', (77340, 77354), False, 'import inspect\n'), ((94051, 94061), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (94059, 94061), True, 'import horovod.torch as hvd\n'), ((94313, 94323), 'horovod.torch.size', 'hvd.size', ([], {}), '()\n', (94321, 94323), True, 'import horovod.torch as hvd\n'), ((94777, 94787), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (94785, 94787), True, 'import horovod.torch as hvd\n'), ((94886, 94896), 'horovod.torch.rank', 'hvd.rank', ([], {}), '()\n', (94894, 94896), True, 'import horovod.torch as hvd\n'), ((66114, 66132), 'torch.is_tensor', 'torch.is_tensor', (['v'], {}), '(v)\n', (66129, 66132), False, 'import torch\n')] |
from ..base import BaseText2Vec
from ....base import catch_vector_errors
from ....doc_utils import ModelDefinition
from ....import_utils import is_all_dependency_installed
from ....models_dict import MODEL_REQUIREMENTS
from datetime import date
if is_all_dependency_installed(MODEL_REQUIREMENTS['encoders-text-tfhub-bert']):
import tensorflow as tf
import tensorflow_hub as hub
import bert
import numpy as np
BertModelDefinition = ModelDefinition(markdown_filepath='encoders/text/tfhub/bert')
__doc__ = BertModelDefinition.create_docs()
class Bert2Vec(BaseText2Vec):
definition = BertModelDefinition
def __init__(self, model_url: str = 'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3', max_seq_length: int = 64, normalize: bool = True):
list_of_urls = [
'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/2',
'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/2',
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/2',
'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/2',
'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/2',
'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/2',
'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/2',
'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/2',
'https://tfhub.dev/tensorflow/bert_en_uncased_L-24_H-1024_A-16/3',
'https://tfhub.dev/tensorflow/bert_en_wwm_cased_L-24_H-1024_A-16/3',
'https://tfhub.dev/tensorflow/bert_en_uncased_L-12_H-768_A-12/3',
'https://tfhub.dev/tensorflow/bert_en_wwm_uncased_L-24_H-1024_A-16/3',
'https://tfhub.dev/tensorflow/bert_en_cased_L-24_H-1024_A-16/3',
'https://tfhub.dev/tensorflow/bert_en_cased_L-12_H-768_A-12/3',
'https://tfhub.dev/tensorflow/bert_zh_L-12_H-768_A-12/3',
'https://tfhub.dev/tensorflow/bert_multi_cased_L-12_H-768_A-12/3',
]
self.validate_model_url(model_url, list_of_urls)
self.max_seq_length = max_seq_length
self.normalize = normalize
self.model_input_type = "dict"
self.init(model_url)
self.tokenizer = self.init_tokenizer()
def init(self, model_url: str):
self.model = hub.KerasLayer(model_url)
input_word_ids = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32)
input_mask = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32)
input_type_ids = tf.keras.layers.Input(shape=(self.max_seq_length,), dtype=tf.int32)
try:
self.model(dict(input_word_ids=input_word_ids, input_mask=input_mask, input_type_ids=input_type_ids))
except ValueError:
self.model_input_type = "list"
self.model([input_word_ids, input_mask, input_type_ids])
def init_tokenizer(self):
self.vocab_file = self.model.resolved_object.vocab_file.asset_path.numpy()
self.do_lower_case = self.model.resolved_object.do_lower_case.numpy()
return bert.bert_tokenization.FullTokenizer(self.vocab_file, self.do_lower_case)
def process(self, input_strings: str):
input_ids_all, input_mask_all, input_type_ids_all = [], [], []
if isinstance(input_strings, str):
input_strings = [input_strings]
for input_string in input_strings:
# Tokenize input.
input_tokens = ["[CLS]"] + \
self.tokenizer.tokenize(input_string) + ["[SEP]"]
input_ids = self.tokenizer.convert_tokens_to_ids(input_tokens)
sequence_length = min(len(input_ids), self.max_seq_length)
# Padding or truncation.
if len(input_ids) >= self.max_seq_length:
input_ids = input_ids[:self.max_seq_length]
else:
input_ids = input_ids + [0] * \
(self.max_seq_length - len(input_ids))
input_mask = [1] * sequence_length + [0] * \
(self.max_seq_length - sequence_length)
input_ids_all.append(input_ids)
input_mask_all.append(input_mask)
input_type_ids_all.append([0] * self.max_seq_length)
return np.array(input_ids_all), np.array(input_mask_all), np.array(input_type_ids_all)
@catch_vector_errors
def encode(self, text: str):
input_ids, input_mask, input_type_ids = self.process(text)
if self.model_input_type == "list":
return self.model([
tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"),
tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"),
tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids")
])[0].numpy().tolist()[0]
else:
return self.model({
"input_word_ids": tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"),
"input_mask": tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"),
"input_type_ids": tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids")
})['pooled_output'].numpy().tolist()[0]
@catch_vector_errors
def bulk_encode(self, texts: list):
input_ids, input_mask, input_type_ids = self.process(texts)
if self.model_input_type == "list":
return self.model([
tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"),
tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"),
tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids")
])[0].numpy().tolist()
else:
return self.model({
"input_word_ids": tf.convert_to_tensor(input_ids, tf.int32, name="input_word_ids"),
"input_mask": tf.convert_to_tensor(input_mask, tf.int32, name="input_mask"),
"input_type_ids": tf.convert_to_tensor(input_type_ids, tf.int32, name="input_type_ids")
})['pooled_output'].numpy().tolist()
| [
"tensorflow.keras.layers.Input",
"tensorflow.convert_to_tensor",
"bert.bert_tokenization.FullTokenizer",
"numpy.array",
"tensorflow_hub.KerasLayer"
] | [((2373, 2398), 'tensorflow_hub.KerasLayer', 'hub.KerasLayer', (['model_url'], {}), '(model_url)\n', (2387, 2398), True, 'import tensorflow_hub as hub\n'), ((2424, 2491), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(self.max_seq_length,)', 'dtype': 'tf.int32'}), '(shape=(self.max_seq_length,), dtype=tf.int32)\n', (2445, 2491), True, 'import tensorflow as tf\n'), ((2513, 2580), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(self.max_seq_length,)', 'dtype': 'tf.int32'}), '(shape=(self.max_seq_length,), dtype=tf.int32)\n', (2534, 2580), True, 'import tensorflow as tf\n'), ((2606, 2673), 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(self.max_seq_length,)', 'dtype': 'tf.int32'}), '(shape=(self.max_seq_length,), dtype=tf.int32)\n', (2627, 2673), True, 'import tensorflow as tf\n'), ((3147, 3220), 'bert.bert_tokenization.FullTokenizer', 'bert.bert_tokenization.FullTokenizer', (['self.vocab_file', 'self.do_lower_case'], {}), '(self.vocab_file, self.do_lower_case)\n', (3183, 3220), False, 'import bert\n'), ((4312, 4335), 'numpy.array', 'np.array', (['input_ids_all'], {}), '(input_ids_all)\n', (4320, 4335), True, 'import numpy as np\n'), ((4337, 4361), 'numpy.array', 'np.array', (['input_mask_all'], {}), '(input_mask_all)\n', (4345, 4361), True, 'import numpy as np\n'), ((4364, 4392), 'numpy.array', 'np.array', (['input_type_ids_all'], {}), '(input_type_ids_all)\n', (4372, 4392), True, 'import numpy as np\n'), ((5506, 5570), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_ids', 'tf.int32'], {'name': '"""input_word_ids"""'}), "(input_ids, tf.int32, name='input_word_ids')\n", (5526, 5570), True, 'import tensorflow as tf\n'), ((5589, 5650), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_mask', 'tf.int32'], {'name': '"""input_mask"""'}), "(input_mask, tf.int32, name='input_mask')\n", (5609, 5650), True, 'import tensorflow as tf\n'), ((5669, 5738), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_type_ids', 'tf.int32'], {'name': '"""input_type_ids"""'}), "(input_type_ids, tf.int32, name='input_type_ids')\n", (5689, 5738), True, 'import tensorflow as tf\n'), ((5854, 5918), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_ids', 'tf.int32'], {'name': '"""input_word_ids"""'}), "(input_ids, tf.int32, name='input_word_ids')\n", (5874, 5918), True, 'import tensorflow as tf\n'), ((5951, 6012), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_mask', 'tf.int32'], {'name': '"""input_mask"""'}), "(input_mask, tf.int32, name='input_mask')\n", (5971, 6012), True, 'import tensorflow as tf\n'), ((6049, 6118), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_type_ids', 'tf.int32'], {'name': '"""input_type_ids"""'}), "(input_type_ids, tf.int32, name='input_type_ids')\n", (6069, 6118), True, 'import tensorflow as tf\n'), ((4611, 4675), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_ids', 'tf.int32'], {'name': '"""input_word_ids"""'}), "(input_ids, tf.int32, name='input_word_ids')\n", (4631, 4675), True, 'import tensorflow as tf\n'), ((4694, 4755), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_mask', 'tf.int32'], {'name': '"""input_mask"""'}), "(input_mask, tf.int32, name='input_mask')\n", (4714, 4755), True, 'import tensorflow as tf\n'), ((4774, 4843), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_type_ids', 'tf.int32'], {'name': '"""input_type_ids"""'}), "(input_type_ids, tf.int32, name='input_type_ids')\n", (4794, 4843), True, 'import tensorflow as tf\n'), ((4962, 5026), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_ids', 'tf.int32'], {'name': '"""input_word_ids"""'}), "(input_ids, tf.int32, name='input_word_ids')\n", (4982, 5026), True, 'import tensorflow as tf\n'), ((5059, 5120), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_mask', 'tf.int32'], {'name': '"""input_mask"""'}), "(input_mask, tf.int32, name='input_mask')\n", (5079, 5120), True, 'import tensorflow as tf\n'), ((5157, 5226), 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['input_type_ids', 'tf.int32'], {'name': '"""input_type_ids"""'}), "(input_type_ids, tf.int32, name='input_type_ids')\n", (5177, 5226), True, 'import tensorflow as tf\n')] |
import argparse # argsparse是python的命令行解析的标准模块,直接在命令行中就可以向程序中传入参数并让程序运行
import os
import numpy as np
# 用于data augmentation
import torchvision.transforms as transforms
# 保存生成图像
from torchvision.utils import save_image
from torch.utils.data import DataLoader
from torchvision import datasets
# Varibale包含三个属性:
# data:存储了Tensor,是本体的数据
# grad:保存了data的梯度,本是个Variable而非Tensor,与data形状一致
# grad_fn:指向Function对象,用于反向传播的梯度计算之用
from torch.autograd import Variable
import torch.nn as nn
import torch.nn.functional as F
import torch
# 如果根目录下不存在images文件夹,则创建images存放生成图像结果
os.makedirs("images", exist_ok=True)
# 创建解析对象
parser = argparse.ArgumentParser()
# epoch = 200,批大小 = 64,学习率 = 0.0002,衰减率 = 0.5/0.999,线程数 = 4,隐码维数 = 100,样本尺寸 = 28 * 28,通道数 = 1,样本间隔 = 400
parser.add_argument("--n_epochs", type=int, default=200, help="number of epochs of training") # 训练的代数
parser.add_argument("--batch_size", type=int, default=64, help="size of the batches") # 一次训练所选取的样本数
parser.add_argument("--lr", type=float, default=0.0002, help="adam: learning rate") # 学习率
parser.add_argument("--b1", type=float, default=0.5, help="adam: decay of first order momentum of gradient")
parser.add_argument("--b2", type=float, default=0.999, help="adam: decay of first order momentum of gradient")
# parser.add_argument("--n_cpu", type=int, default=4, help="number of cpu threads to use during batch generation") # 用到的cpu数量
parser.add_argument("--latent_dim", type=int, default=100, help="dimensionality of the latent space") # 一开始生成器输入的是100维服从高斯分布的向量,称为潜在空间的维度数
parser.add_argument("--img_size", type=int, default=28, help="size of each image dimension") # 每张图片的尺寸
parser.add_argument("--channels", type=int, default=1, help="number of image channels") # 每张图片的通道数
parser.add_argument("--sample_interval", type=int, default=400, help="interval betwen image samples") # 样本图像之间的间隔
# 定义使用的GPU卡号
# parser.add_argument("--gpu_device", choices=["1", "2", "3"], default="3", help="gpu device number")
# os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpu_device
# 解析参数
opt = parser.parse_args()
print(opt)
# 定义输入图片shape大小,初始为(1,28,28),即单通道28×28的图片 (通道数,图片维度数,图片维度数)
img_shape = (opt.channels, opt.img_size, opt.img_size)
# 使用cuda的条件
cuda = True if torch.cuda.is_available() else False
# 模型构建两个步骤:①构建子模块;②拼接子模块
class Generator(nn.Module):
# 构建子模块在init()函数中实现
def __init__(self):
super(Generator, self).__init__()
def block(in_feat, out_feat, normalize=True):
# 这里简单的只对输入数据做线性转换,nn.Linear()用于设置网络的全连接层,
# 而全连接层的输入和输出都是二维张量,一般形状为[batch_size, size]
layers = [nn.Linear(in_feat, out_feat)] #
# 使用BN,对小批量的二维输入(三维也可以)进行批标准化
if normalize:
layers.append(nn.BatchNorm1d(out_feat, 0.8))
# 添加LeakyReLU非线性激活层
layers.append(nn.LeakyReLU(0.2, inplace=True))
return layers
# 创建生成器网络模型
self.model = nn.Sequential(
*block(opt.latent_dim, 128, normalize=False),
*block(128, 256),
*block(256, 512),
*block(512, 1024),
nn.Linear(1024, int(np.prod(img_shape))), # 此时对于该隐藏层(全连接层),输入是1024,输出是img_shape元素相乘的结果。
# numpy.prod(img_shape)= img_shape内各个元素相乘之积,之后强制转换为int
nn.Tanh() # 经过Tanh激活函数是希望生成的假的图片数据分布能够在-1~1之间
)
# 拼接子模块在forward()函数中实现
# 前向
def forward(self, z):
# 生成假样本
img = self.model(z) # 往生成器模型中输入噪声z,并赋值给img(假样本)
img = img.view(img.size(0), *img_shape) # 利用Sequential.view(),重新调整 tensor 的形状(但总元素不变)
# 关于view()、size()可参考笔记文档
# 返回生成图像
return img
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.model = nn.Sequential(
nn.Linear(int(np.prod(img_shape)), 512), # 输入对应了生成器生成的图像(fake),输出为512
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(512, 256),
nn.LeakyReLU(0.2, inplace=True),
nn.Linear(256, 1),
# 因需判别真假,这里使用Sigmoid函数给出标量的判别结果
nn.Sigmoid(),
)
# 判别
def forward(self, img):
img_flat = img.view(img.size(0), -1) # 此时右式 = img.view(opt.channels, -1), 将值赋给左式.
# 此时左式 img_flat = (opt.channels, opt.img_size * opt.img_size)
validity = self.model(img_flat)
# 判别结果
return validity
# Loss function 损失函数:二分类交叉熵函数
adversarial_loss = torch.nn.BCELoss()
# Initialize generator and discriminator 优化器,G和D都使用Adam
generator = Generator()
discriminator = Discriminator()
if cuda:
generator.cuda()
discriminator.cuda()
adversarial_loss.cuda()
# Configure data loader 加载数据集
os.makedirs("dataset", exist_ok=True)
# ------------------------------------------
# torch.utils.data.DataLoader
# ------------------------------------------
# 数据加载器,结合了数据集和取样器,并且可以提供多个线程处理数据集。在训练模型时使用到此函数,用来把训练数据分成多个小组,此函数每次抛出一组数据。直至把所有的数据都抛出。就是做一个数据的初始化
#
# torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=False, sampler=None,
# batch_sampler=None, num_workers=0, collate_fn=<function default_collate>,
# pin_memory=False, drop_last=False, timeout=0, worker_init_fn=None)
# dataset:加载数据的数据集
# batch_size:每批次加载的数据量
# shuffle:默认false,若为True,表示在每个epoch打乱数据
# sampler:定义从数据集中绘制示例的策略,如果指定,shuffle必须为False
dataloader = torch.utils.data.DataLoader(
datasets.MNIST(
"dataset",
train=True,
download=True,
transform=transforms.Compose(
[transforms.Resize(opt.img_size), transforms.ToTensor(), transforms.Normalize([0.5], [0.5])]
),
),
batch_size=opt.batch_size,
shuffle=True,
)
# Optimizers
optimizer_G = torch.optim.Adam(generator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
optimizer_D = torch.optim.Adam(discriminator.parameters(), lr=opt.lr, betas=(opt.b1, opt.b2))
Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
# ----------
# 训练模型
# ----------
for epoch in range(opt.n_epochs):
for i, (imgs, _) in enumerate(dataloader):
# Adversarial ground truths
valid = Variable(Tensor(imgs.size(0), 1).fill_(1.0), requires_grad=False)
fake = Variable(Tensor(imgs.size(0), 1).fill_(0.0), requires_grad=False)
# 输入
real_imgs = Variable(imgs.type(Tensor))
# -----------------
# 训练G
# -----------------
optimizer_G.zero_grad()
# 采样随机噪声向量
z = Variable(Tensor(np.random.normal(0, 1, (imgs.shape[0], opt.latent_dim))))
# 训练得到一批次生成样本
gen_imgs = generator(z)
# 计算G的损失函数值
g_loss = adversarial_loss(discriminator(gen_imgs), valid)
# 更新G
g_loss.backward()
optimizer_G.step()
# ---------------------
# 训练D
# ---------------------
optimizer_D.zero_grad()
# 评估D的判别能力
real_loss = adversarial_loss(discriminator(real_imgs), valid)
fake_loss = adversarial_loss(discriminator(gen_imgs.detach()), fake)
d_loss = (real_loss + fake_loss) / 2
# 更新D
d_loss.backward()
optimizer_D.step()
print(
"[Epoch %d/%d] [Batch %d/%d] [D loss: %f] [G loss: %f]"
% (epoch, opt.n_epochs, i, len(dataloader), d_loss.item(), g_loss.item())
)
# 保存结果
batches_done = epoch * len(dataloader) + i
if batches_done % opt.sample_interval == 0:
save_image(gen_imgs.data[:25], "images/%d.png" % batches_done, nrow=5, normalize=True)
# 保存模型
torch.save(generator, './model/G_model.pth')
torch.save(discriminator, './model/D_model.pth')
| [
"numpy.random.normal",
"torch.nn.Sigmoid",
"numpy.prod",
"torch.nn.Tanh",
"argparse.ArgumentParser",
"os.makedirs",
"torch.nn.LeakyReLU",
"torch.nn.BatchNorm1d",
"torch.nn.BCELoss",
"torch.cuda.is_available",
"torch.save",
"torch.nn.Linear",
"torchvision.transforms.Resize",
"torchvision.tr... | [((589, 625), 'os.makedirs', 'os.makedirs', (['"""images"""'], {'exist_ok': '(True)'}), "('images', exist_ok=True)\n", (600, 625), False, 'import os\n'), ((648, 673), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (671, 673), False, 'import argparse\n'), ((4667, 4685), 'torch.nn.BCELoss', 'torch.nn.BCELoss', ([], {}), '()\n', (4683, 4685), False, 'import torch\n'), ((4927, 4964), 'os.makedirs', 'os.makedirs', (['"""dataset"""'], {'exist_ok': '(True)'}), "('dataset', exist_ok=True)\n", (4938, 4964), False, 'import os\n'), ((2298, 2323), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (2321, 2323), False, 'import torch\n'), ((7908, 7952), 'torch.save', 'torch.save', (['generator', '"""./model/G_model.pth"""'], {}), "(generator, './model/G_model.pth')\n", (7918, 7952), False, 'import torch\n'), ((7962, 8010), 'torch.save', 'torch.save', (['discriminator', '"""./model/D_model.pth"""'], {}), "(discriminator, './model/D_model.pth')\n", (7972, 8010), False, 'import torch\n'), ((3406, 3415), 'torch.nn.Tanh', 'nn.Tanh', ([], {}), '()\n', (3413, 3415), True, 'import torch.nn as nn\n'), ((4060, 4091), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4072, 4091), True, 'import torch.nn as nn\n'), ((4106, 4125), 'torch.nn.Linear', 'nn.Linear', (['(512)', '(256)'], {}), '(512, 256)\n', (4115, 4125), True, 'import torch.nn as nn\n'), ((4140, 4171), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (4152, 4171), True, 'import torch.nn as nn\n'), ((4186, 4203), 'torch.nn.Linear', 'nn.Linear', (['(256)', '(1)'], {}), '(256, 1)\n', (4195, 4203), True, 'import torch.nn as nn\n'), ((4263, 4275), 'torch.nn.Sigmoid', 'nn.Sigmoid', ([], {}), '()\n', (4273, 4275), True, 'import torch.nn as nn\n'), ((7794, 7884), 'torchvision.utils.save_image', 'save_image', (['gen_imgs.data[:25]', "('images/%d.png' % batches_done)"], {'nrow': '(5)', 'normalize': '(True)'}), "(gen_imgs.data[:25], 'images/%d.png' % batches_done, nrow=5,\n normalize=True)\n", (7804, 7884), False, 'from torchvision.utils import save_image\n'), ((2678, 2706), 'torch.nn.Linear', 'nn.Linear', (['in_feat', 'out_feat'], {}), '(in_feat, out_feat)\n', (2687, 2706), True, 'import torch.nn as nn\n'), ((2907, 2938), 'torch.nn.LeakyReLU', 'nn.LeakyReLU', (['(0.2)'], {'inplace': '(True)'}), '(0.2, inplace=True)\n', (2919, 2938), True, 'import torch.nn as nn\n'), ((6786, 6841), 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(imgs.shape[0], opt.latent_dim)'], {}), '(0, 1, (imgs.shape[0], opt.latent_dim))\n', (6802, 6841), True, 'import numpy as np\n'), ((2814, 2843), 'torch.nn.BatchNorm1d', 'nn.BatchNorm1d', (['out_feat', '(0.8)'], {}), '(out_feat, 0.8)\n', (2828, 2843), True, 'import torch.nn as nn\n'), ((3213, 3231), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (3220, 3231), True, 'import numpy as np\n'), ((3990, 4008), 'numpy.prod', 'np.prod', (['img_shape'], {}), '(img_shape)\n', (3997, 4008), True, 'import numpy as np\n'), ((5797, 5828), 'torchvision.transforms.Resize', 'transforms.Resize', (['opt.img_size'], {}), '(opt.img_size)\n', (5814, 5828), True, 'import torchvision.transforms as transforms\n'), ((5830, 5851), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5849, 5851), True, 'import torchvision.transforms as transforms\n'), ((5853, 5887), 'torchvision.transforms.Normalize', 'transforms.Normalize', (['[0.5]', '[0.5]'], {}), '([0.5], [0.5])\n', (5873, 5887), True, 'import torchvision.transforms as transforms\n')] |
"""Yohkoh SXT Map subclass definitions"""
__author__ = "<NAME>"
__email__ = "<EMAIL>"
import numpy as np
from astropy.visualization import PowerStretch
from astropy.visualization.mpl_normalize import ImageNormalize
from sunpy.map import GenericMap
from sunpy.map.sources.source_type import source_stretch
from sunpy.sun import constants
__all__ = ['SXTMap']
class SXTMap(GenericMap):
"""Yohkoh SXT Image Map
The Yohkoh Soft X-ray Telescope (SXT) the full solar disk
(42 x 42 arcminutes)in the 0.25 - 4.0 keV range.
It consists of a glancing incidence mirror and a CCD sensor and
used thin metallic filters to acquire images in restricted
portions of its energy range. SXT could resolve features down to 2.5
arcseconds. Information about the temperature and density of the plasma
emitting the observed x-rays was obtained by comparing images acquired with
the different filters. Images could be obtained every 2 to 8 seconds.
Smaller images with a single filter could be obtained as frequently as
once every 0.5 seconds.
Yohkoh was launched on 30 August 1991 and ceased operations on
14 December 2001.
References
----------
* `Yohkoh Mission Page <http://solar.physics.montana.edu/sxt/>`_
* `Fits header reference <http://proba2.oma.be/data/SWAP/level0>`_
* `Yohkoh Analysis Guide <http://ylstone.physics.montana.edu/ylegacy/yag.html>`_
"""
def __init__(self, data, header, **kwargs):
GenericMap.__init__(self, data, header, **kwargs)
self.meta['detector'] = "SXT"
self.meta['telescop'] = "Yohkoh"
self.plot_settings['cmap'] = 'yohkohsxt' + self.measurement[0:2].lower()
self.plot_settings['norm'] = ImageNormalize(
stretch=source_stretch(self.meta, PowerStretch(0.5)), clip=False)
# 2012/12/19 - the SXT headers do not have a value of the distance from
# the spacecraft to the center of the Sun. The FITS keyword 'DSUN_OBS'
# appears to refer to the observed diameter of the Sun. Until such
# time as that is calculated and properly included in the file, we will
# use simple trigonometry to calculate the distance of the center of
# the Sun from the spacecraft. Note that the small angle approximation
# is used, and the solar radius stored in SXT FITS files is in arcseconds.
self.meta['dsun_apparent'] = self.meta.get('dsun_apparent', constants.au)
if 'solar_r' in self.meta:
self.meta['dsun_apparent'] = constants.radius/(np.deg2rad(self.meta['solar_r']/3600.0))
@property
def dsun(self):
""" For Yohkoh Maps, dsun_obs is not always defined. Uses approximation
defined above it is not defined."""
return self.meta.get('dsun_obs', self.meta['dsun_apparent'])
@property
def measurement(self):
"""
Returns the type of data observed.
"""
s = self.meta.get('wavelnth', '')
if s == 'Al.1':
s = 'Al01'
elif s.lower() == 'open':
s = 'white-light'
return s
@classmethod
def is_datasource_for(cls, data, header, **kwargs):
"""Determines if header corresponds to an SXT image"""
return header.get('instrume') == 'SXT'
| [
"numpy.deg2rad",
"sunpy.map.GenericMap.__init__",
"astropy.visualization.PowerStretch"
] | [((1484, 1533), 'sunpy.map.GenericMap.__init__', 'GenericMap.__init__', (['self', 'data', 'header'], {}), '(self, data, header, **kwargs)\n', (1503, 1533), False, 'from sunpy.map import GenericMap\n'), ((2559, 2600), 'numpy.deg2rad', 'np.deg2rad', (["(self.meta['solar_r'] / 3600.0)"], {}), "(self.meta['solar_r'] / 3600.0)\n", (2569, 2600), True, 'import numpy as np\n'), ((1794, 1811), 'astropy.visualization.PowerStretch', 'PowerStretch', (['(0.5)'], {}), '(0.5)\n', (1806, 1811), False, 'from astropy.visualization import PowerStretch\n')] |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
hsv_colors = [(0.56823266219239377, 0.82777777777777772, 0.70588235294117652),
(0.078146611341632088, 0.94509803921568625, 1.0),
(0.33333333333333331, 0.72499999999999998, 0.62745098039215685),
(0.99904761904761907, 0.81775700934579443, 0.83921568627450982),
(0.75387596899224807, 0.45502645502645506, 0.74117647058823533),
(0.028205128205128216, 0.4642857142857143, 0.5490196078431373),
(0.8842592592592593, 0.47577092511013214, 0.8901960784313725),
(0.0, 0.0, 0.49803921568627452),
(0.16774193548387095, 0.82010582010582012, 0.74117647058823533),
(0.51539855072463769, 0.88888888888888884, 0.81176470588235294)]
rgb_colors = matplotlib.colors.hsv_to_rgb(np.array(hsv_colors).reshape(10, 1, 3))
colors = matplotlib.colors.ListedColormap(rgb_colors.reshape(10, 3))
def plot(step, Y, labels, save_path):
figure = plt.figure()
plt.scatter(Y[:, 0], Y[:, 1], s=30, c=labels, cmap=colors, linewidth=0)
plt.colorbar()
plt.title(f'{step} step projection figure')
figure.savefig(f'{save_path}/{step}_step.png')
| [
"matplotlib.pyplot.colorbar",
"numpy.array",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.title"
] | [((1016, 1028), 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), '()\n', (1026, 1028), True, 'import matplotlib.pyplot as plt\n'), ((1033, 1104), 'matplotlib.pyplot.scatter', 'plt.scatter', (['Y[:, 0]', 'Y[:, 1]'], {'s': '(30)', 'c': 'labels', 'cmap': 'colors', 'linewidth': '(0)'}), '(Y[:, 0], Y[:, 1], s=30, c=labels, cmap=colors, linewidth=0)\n', (1044, 1104), True, 'import matplotlib.pyplot as plt\n'), ((1109, 1123), 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), '()\n', (1121, 1123), True, 'import matplotlib.pyplot as plt\n'), ((1128, 1171), 'matplotlib.pyplot.title', 'plt.title', (['f"""{step} step projection figure"""'], {}), "(f'{step} step projection figure')\n", (1137, 1171), True, 'import matplotlib.pyplot as plt\n'), ((854, 874), 'numpy.array', 'np.array', (['hsv_colors'], {}), '(hsv_colors)\n', (862, 874), True, 'import numpy as np\n')] |
#pythran export run(int, int, int)
#runas run(10,10,10)
#from https://raw.githubusercontent.com/cphhpc/numpy/victim_cache/benchmark/Python/shallow_water.py
import numpy as np
def model(height, width, dtype):
m = np.ones((height, width),dtype=dtype)
m[height/4,width/4] = 6.0
return m
def step(H, U, V, dt=0.02, dx=1.0, dy=1.0):
g = 9.80665 # gravitational acceleration
# Reflecting boundary conditions
H[:,0] = H[:,1] ; U[:,0] = U[:,1] ; V[:,0] = -V[:,1]
H[:,-1] = H[:,-2] ; U[:,-1] = U[:,-2] ; V[:,-1] = -V[:,-2]
H[0,:] = H[1,:] ; U[0,:] = -U[1,:] ; V[0,:] = V[1,:]
H[-1,:] = H[-2,:] ; U[-1,:] = -U[-2,:] ; V[-1,:] = V[-2,:]
#First half step
# height
Hx = (H[1:,1:-1]+H[:-1,1:-1])/2 - dt/(2*dx)*(U[1:,1:-1]-U[:-1,1:-1])
# x momentum
Ux = (U[1:,1:-1]+U[:-1,1:-1])/2 - \
dt/(2*dx) * ((U[1:,1:-1]**2/H[1:,1:-1] + g/2*H[1:,1:-1]**2) -
(U[:-1,1:-1]**2/H[:-1,1:-1] + g/2*H[:-1,1:-1]**2))
# y momentum
Vx = (V[1:,1:-1]+V[:-1,1:-1])/2 - \
dt/(2*dx) * ((U[1:,1:-1]*V[1:,1:-1]/H[1:,1:-1]) -
(U[:-1,1:-1]*V[:-1,1:-1]/H[:-1,1:-1]))
# height
Hy = (H[1:-1,1:]+H[1:-1,:-1])/2 - dt/(2*dy)*(V[1:-1,1:]-V[1:-1,:-1])
#x momentum
Uy = (U[1:-1,1:]+U[1:-1,:-1])/2 - \
dt/(2*dy)*((V[1:-1,1:]*U[1:-1,1:]/H[1:-1,1:]) -
(V[1:-1,:-1]*U[1:-1,:-1]/H[1:-1,:-1]))
#y momentum
Vy = (V[1:-1,1:]+V[1:-1,:-1])/2 - \
dt/(2*dy)*((V[1:-1,1:]**2/H[1:-1,1:] + g/2*H[1:-1,1:]**2) -
(V[1:-1,:-1]**2/H[1:-1,:-1] + g/2*H[1:-1,:-1]**2))
#Second half step
# height
H[1:-1,1:-1] -= (dt/dx)*(Ux[1:,:]-Ux[:-1,:]) + (dt/dy)*(Vy[:,1:]-Vy[:,:-1])
# x momentum
U[1:-1,1:-1] -= (dt/dx)*((Ux[1:,:]**2/Hx[1:,:] + g/2*Hx[1:,:]**2) -
(Ux[:-1,:]**2/Hx[:-1,:] + g/2*Hx[:-1,:]**2)) + \
(dt/dy)*((Vy[:,1:] * Uy[:,1:]/Hy[:,1:]) -
(Vy[:,:-1] * Uy[:,:-1]/Hy[:,:-1]))
# y momentum
V[1:-1,1:-1] -= (dt/dx)*((Ux[1:,:] * Vx[1:,:]/Hx[1:,:]) -
(Ux[:-1,:]*Vx[:-1,:]/Hx[:-1,:])) + \
(dt/dy)*((Vy[:,1:]**2/Hy[:,1:] + g/2*Hy[:,1:]**2) -
(Vy[:,:-1]**2/Hy[:,:-1] + g/2*Hy[:,:-1]**2))
return (H, U, V)
def simulate(H, timesteps):
U = np.zeros_like(H)
V = np.zeros_like(H)
for i in xrange(timesteps):
(H, U, V) = step(H, U, V)
return H
def run(H, W, I):
m = model(H, W, dtype=np.float64)
m = simulate(m,I)
return m
| [
"numpy.zeros_like",
"numpy.ones"
] | [((217, 254), 'numpy.ones', 'np.ones', (['(height, width)'], {'dtype': 'dtype'}), '((height, width), dtype=dtype)\n', (224, 254), True, 'import numpy as np\n'), ((2400, 2416), 'numpy.zeros_like', 'np.zeros_like', (['H'], {}), '(H)\n', (2413, 2416), True, 'import numpy as np\n'), ((2425, 2441), 'numpy.zeros_like', 'np.zeros_like', (['H'], {}), '(H)\n', (2438, 2441), True, 'import numpy as np\n')] |
"""
Working with PSID in python
@author : <NAME> <<EMAIL>>
@date : 2015-02-04 09:02:56
use the read_csv option `usecols` to only keep what we need
"""
import re
import os
import gc
import os.path
import zipfile
import requests
import lxml.html
import numpy as np
import pandas as pd
# ----------- #
# Downloading #
# ----------- #
# Define lookup that maps years into request numbers.
file_year = map(str, list(range(1968, 1998)) + list(range(1999, 2012, 2)))
request_numbers = map(str, ([1056] + list(range(1058, 1083)) +
list(range(1047, 1052)) +
[1040, 1052, 1132, 1139, 1152, 1156]))
file_lookup = dict(zip(file_year, request_numbers))
file_lookup["ind"] = "1053"
def start_psid_session(user=None, password=None):
"""
Use user supplied login details to log in to umich site for PSID
download
"""
login_url = "http://simba.isr.umich.edu/u/Login.aspx"
# start html session so we can log in
session = requests.session()
start = session.get(login_url)
html = start.text
root = lxml.html.fromstring(html)
# Stuff so we can log in
EVAL = root.xpath('//input[@name="__EVENTVALIDATION"]')[0].attrib['value']
VIEWSTATE = root.xpath('//input[@name="__VIEWSTATE"]')[0].attrib['value']
acc_pwd = {'ctl00$ContentPlaceHolder1$Login1$UserName': user,
'ctl00$ContentPlaceHolder1$Login1$Password': password,
'ctl00$ContentPlaceHolder1$Login1$LoginButton': 'Log In',
'__EVENTTARGET': '',
'__EVENTARGUMENT': '',
'__VIEWSTATE': VIEWSTATE,
'__EVENTVALIDATION': EVAL}
# Send login message to PSID site
session.post(login_url, data=acc_pwd)
# Check for login
z = session.get('http://simba.isr.umich.edu/data/data.aspx')
tf2 = 'Logout' in str(z.content)
print('Successful login: %s' % (tf2))
return session
# Function to download PSID zip file
def download_psid(number, local_filename, session):
"""
Download a zip file form the PSID and save to local_filename
"""
request_start = 'http://simba.isr.umich.edu/Zips/GetFile.aspx?file='
# Get the file using requests
r = session.get(request_start + number, stream=True)
with open(local_filename, 'wb') as f:
# Write it out in chunks incase it's big
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
f.flush()
return local_filename
# Extracting PSID using psid_unzip.
def psid_unzip(filename, extractall=False):
zfile = zipfile.ZipFile(filename)
def keep_file(n):
if extractall:
return True
else:
return ".sas" in name or ".txt" in name or ".pdf" in name
for name in zfile.namelist():
# Only take out the files we want
if keep_file(name):
(dirname, filename) = os.path.split(name)
if ".pdf" in name: # Different directory for Codebooks
dirname = dirname + "Codebooks"
if ".txt" in name:
nascii = name # Keep track of ascii name
if ".sas" in name:
nsas = name # Keep track of sas name
print("Decompressing %s on %s" % (filename, dirname))
if dirname != '':
if not os.path.exists(dirname):
os.makedirs(dirname)
zfile.extract(name, dirname) # Extract file
return (nsas, nascii)
def sascii2csv(sas_name, ascii_name, csv_name, remove_orig=True):
"""
Read in ascii data from SAS commands and write out csv
"""
# Open sas file
x = open(sas_name, "r")
dat = x.read()
dat_split = dat.split('\n')
# RE for variable designation
re_var = "^\s*(?P<variable>\S+)\s+"
# RE for variable label
re_label = '[(LABEL)(label)]\s*=\s*"(?P<label>[^"]+)"'
# RE for variable format
re_format = "[(FORMAT)(format)]\s*=\s*(?P<format>\S+)\s"
# RE for variable position
re_length = "\s*(?P<length1>\d*)\s*-\s*(?P<length2>\d*)\s*"
meta = []
for dstr in dat_split:
res_var = re.search(re_var, dstr) # Find variable name in line
res_label = re.search(re_label, dstr) # Find variable label
res_format = re.search(re_format, dstr) # Find variable format
if not (res_var is None or res_label is None or res_format is None):
# Now that we have a verified variable name...
# Find position RE
counts = re.search(res_var.group("variable")+re_length, dat)
l1 = int(counts.group("length1")) # Grab out first position
l2 = int(counts.group("length2")) # Grab out second position
# Add to meta data
meta += [{"variable": res_var.group("variable"),
"label": res_label.group("label"),
"format": res_format.group("format"),
"l1": l1,
"l2": l2,
"l3": l2 - l1 + 1}]
# Get relevant descriptions
names = [z["label"] for z in meta]
lengths = [z["l3"] for z in meta]
del meta
# Use numpy to read fixed width file and write as .csv
data = np.genfromtxt(ascii_name, names=names, delimiter=lengths)
np.savetxt(csv_name, data, delimiter=',',
header=','.join(data.dtype.names))
del data
if remove_orig:
os.remove(sas_name)
os.remove(ascii_name)
def download_unzip_csv_psid(f_name, request_num, session, to_csv=True,
remove_orig=True, verbose=True):
"""
Download a family data set
"""
# Download zip file
if verbose:
print("Downloading %s" % f_name)
x = download_psid(str(request_num), f_name, session)
# Unzip
if verbose:
print("Unzipping %s" % f_name)
sas_name, ascii_name = psid_unzip(f_name)
if to_csv:
if verbose:
print("Converting %s to csv" % ascii_name)
# generate csv_name and convert to csv
csv_name = f_name.strip(".zip") + ".csv"
sascii2csv(sas_name, ascii_name, csv_name, remove_orig=remove_orig)
if remove_orig:
os.remove(f_name)
gc.collect()
def download_all_family_data(session, to_csv=True, **kwargs):
"""
Download all family data sets
"""
for (fy, rn) in file_lookup.copy().pop("ind").items():
fn = "FAM" + fy + ".zip"
download_unzip_csv_psid(fn, rn, session, to_csv=to_csv, **kwargs)
return
def download_ind_cross_year(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("IND2011ER.zip", str(1053), session,
to_csv=to_csv, **kwargs)
return
def download_parentfile(session, to_csv=True, **kwargs):
"""
Download the cross year individual file
"""
download_unzip_csv_psid("PID2011ER.zip", str(1123), session,
to_csv=to_csv, **kwargs)
return
def download_all_data(session, to_csv=True, **kwargs):
"""
Call the download ind and download all family functions
"""
download_ind_cross_year(session, to_csv=True, **kwargs)
download_all_family_data(session, to_csv=True, **kwargs)
return
# -------- #
# Cleaning #
# -------- #
def clean_indfile_names(df):
"""
Most of the columns in the PSID individual file have many
underscores in between the variable name and the year. The next few
lines remove those cases and re- assigns the column names.
This is necessary for us to save that data to hdf in table format
"""
cols = pd.Series(df.columns, dtype=str)
c2 = cols.str.extract("(.+?)__+(\d\d)")
cols2 = c2[0] + c2[1]
cols2 = cols2.fillna(cols)
df.cols = cols2
return df
def csv2hdf(csv_fn, hdf_fn, hdf_gn=None, hdf_mode="a",
extra_func=None):
"""
Move the file csv_fn to an HDF file.
Parameters
----------
csv_fn : string
The file name for the csv
hdf_fn: string
The name of the hdf file to write to
hdf_gn: string, optional
A string specifying the `path` to the group to contain the
dataset. If none is given, the data set is saved to `/fn`, where
fn is the root of csv_fn
hdf_mode: string, optional(default="a")
The open mode for the hdf file. Default is append
extra_func: function, optional(default=None)
An extra function the user can supply to clean or otherwise
alter the data set after reading in from csv, but before saving
to hdf
Returns
-------
None
Notes
-----
This function tries to write the data set in table form, but if it
cannot it will fallback to writing in fixed form.
For a discussion on the differences see the pandas manual
"""
df = pd.read_csv(csv_fn)
if extra_func is not None:
df = extra_func(df)
if hdf_gn is None:
# split to path/file then chop last 4 characters off (`.csv`)
hdf_gn = os.path.split(csv_fn)[1][:-4]
try:
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="table",
complib="blosc")
print("Added %s to %s" % (hdf_gn, hdf_fn))
except:
print("WARN: Couldn't store %s as table. Using fixed" % hdf_gn)
df.to_hdf(hdf_fn, hdf_gn, mode=hdf_mode, format="fixed",
complib="blosc")
return
def _convert_to_4_digit_year(yr):
print("recieved yr: %s" % yr)
if len(yr) == 4:
return yr
if len(yr) == 1:
return "200" + yr
if len(yr) == 3:
raise ValueError("Can't parse three digit year")
iy = int(yr)
if 0 <= iy <= 9: # 200x
return "20" + yr
elif 10 < iy <= int(str(datetime.datetime.now().year)[2:]):
return "20" + yr
else: # assuming in 1900's
return "19" + yr
if __name__ == '__main__':
import glob
import argparse
import datetime
from textwrap import dedent
d_help = dedent("""\
Download the specified data file. If argument begins with a, all files
will be downloaded. If it begins with i, only the cross-year individual
file will be downloaded. If it is of the form fYY or fYYYY then only the
family file for the given year will be downloaded
""")
# create parser and add arguments
parser = argparse.ArgumentParser()
parser.add_argument("-d", "--download",
help=d_help)
parser.add_argument("--hdf",
help="Convert csv files to hdf named PSID.hdf",
action="store_true")
parser.add_argument("-u", "--username",
help="Specify username for PSID website")
parser.add_argument("-p", "--password",
help="Specify password for PSID website")
args = parser.parse_args()
# Handle download arg
if args.download:
# make sure we have a user_name and password
if args.username is None or args.password is None:
msg = dedent("""\
Must supply username and password. Example syntax:
`python psid.py -u USERNAME -p PASSWORD -d f75 --hdf`
If you don't yet have an account, go to http://simba.isr.umich.edu
and create one
""")
raise ValueError(msg)
a = args.download
session = start_psid_session(user=args.username,
password=args.password)
if a.startswith("a"): # download all
download_all_data(session)
elif a.startswith("i"): # download individual file
download_ind_cross_year(session, to_csv=True)
elif a.startswith("p"): # download parent id file
download_parentfile(session, to_csv=True)
else:
# download single family file
m = re.match("f?(\d+)", a.lower())
if m is not None:
yr = m.groups()[0]
yr = _convert_to_4_digit_year(yr)
rn = file_lookup[yr]
fn = "FAM" + yr + ".zip"
download_unzip_csv_psid(fn, rn, session, to_csv=True)
else:
raise ValueError("Could not parse download option")
# Handle hdf arg
if args.hdf:
fnames = glob.glob("./*.csv") # get csv file names.
fnames.sort(reverse=True) # Sorting to put IND file at top
for f in fnames:
if f.lower().startswith("ind"):
csv2hdf(f, "PSID.hdf", extra_func=clean_indfile_names)
else:
csv2hdf(f, "PSID.hdf")
| [
"pandas.Series",
"textwrap.dedent",
"requests.session",
"os.path.exists",
"zipfile.ZipFile",
"pandas.read_csv",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.split",
"os.remove",
"datetime.datetime.now",
"gc.collect",
"numpy.genfromtxt",
"glob.glob",
"re.search"
] | [((998, 1016), 'requests.session', 'requests.session', ([], {}), '()\n', (1014, 1016), False, 'import requests\n'), ((2620, 2645), 'zipfile.ZipFile', 'zipfile.ZipFile', (['filename'], {}), '(filename)\n', (2635, 2645), False, 'import zipfile\n'), ((5272, 5329), 'numpy.genfromtxt', 'np.genfromtxt', (['ascii_name'], {'names': 'names', 'delimiter': 'lengths'}), '(ascii_name, names=names, delimiter=lengths)\n', (5285, 5329), True, 'import numpy as np\n'), ((6269, 6281), 'gc.collect', 'gc.collect', ([], {}), '()\n', (6279, 6281), False, 'import gc\n'), ((7712, 7744), 'pandas.Series', 'pd.Series', (['df.columns'], {'dtype': 'str'}), '(df.columns, dtype=str)\n', (7721, 7744), True, 'import pandas as pd\n'), ((8939, 8958), 'pandas.read_csv', 'pd.read_csv', (['csv_fn'], {}), '(csv_fn)\n', (8950, 8958), True, 'import pandas as pd\n'), ((10104, 10414), 'textwrap.dedent', 'dedent', (['""" Download the specified data file. If argument begins with a, all files\n will be downloaded. If it begins with i, only the cross-year individual\n file will be downloaded. If it is of the form fYY or fYYYY then only the\n family file for the given year will be downloaded\n """'], {}), '(\n """ Download the specified data file. If argument begins with a, all files\n will be downloaded. If it begins with i, only the cross-year individual\n file will be downloaded. If it is of the form fYY or fYYYY then only the\n family file for the given year will be downloaded\n """\n )\n', (10110, 10414), False, 'from textwrap import dedent\n'), ((10459, 10484), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (10482, 10484), False, 'import argparse\n'), ((4177, 4200), 're.search', 're.search', (['re_var', 'dstr'], {}), '(re_var, dstr)\n', (4186, 4200), False, 'import re\n'), ((4251, 4276), 're.search', 're.search', (['re_label', 'dstr'], {}), '(re_label, dstr)\n', (4260, 4276), False, 'import re\n'), ((4321, 4347), 're.search', 're.search', (['re_format', 'dstr'], {}), '(re_format, dstr)\n', (4330, 4347), False, 'import re\n'), ((5468, 5487), 'os.remove', 'os.remove', (['sas_name'], {}), '(sas_name)\n', (5477, 5487), False, 'import os\n'), ((5496, 5517), 'os.remove', 'os.remove', (['ascii_name'], {}), '(ascii_name)\n', (5505, 5517), False, 'import os\n'), ((6246, 6263), 'os.remove', 'os.remove', (['f_name'], {}), '(f_name)\n', (6255, 6263), False, 'import os\n'), ((12419, 12439), 'glob.glob', 'glob.glob', (['"""./*.csv"""'], {}), "('./*.csv')\n", (12428, 12439), False, 'import glob\n'), ((2941, 2960), 'os.path.split', 'os.path.split', (['name'], {}), '(name)\n', (2954, 2960), False, 'import os\n'), ((11147, 11420), 'textwrap.dedent', 'dedent', (['""" Must supply username and password. Example syntax:\n\n `python psid.py -u USERNAME -p PASSWORD -d f75 --hdf`\n\n If you don\'t yet have an account, go to http://simba.isr.umich.edu\n and create one\n """'], {}), '(\n """ Must supply username and password. Example syntax:\n\n `python psid.py -u USERNAME -p PASSWORD -d f75 --hdf`\n\n If you don\'t yet have an account, go to http://simba.isr.umich.edu\n and create one\n """\n )\n', (11153, 11420), False, 'from textwrap import dedent\n'), ((9130, 9151), 'os.path.split', 'os.path.split', (['csv_fn'], {}), '(csv_fn)\n', (9143, 9151), False, 'import os\n'), ((3373, 3396), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (3387, 3396), False, 'import os\n'), ((3418, 3438), 'os.makedirs', 'os.makedirs', (['dirname'], {}), '(dirname)\n', (3429, 3438), False, 'import os\n'), ((9855, 9878), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (9876, 9878), False, 'import datetime\n')] |
#!/usr/bin/env python
from __future__ import print_function
import numpy as np
import random
random.seed(1337)
np.random.seed(1337) # for reproducibility
from keras.models import Sequential, load_model, save_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution3D, MaxPooling3D
from keras.utils import np_utils
import argparse
import os
from common import load, read_mapping_file
DESCRIPTION = """
Runs a CNN on the offline preprocessed lung data
"""
BATCH_SIZE = 1
NB_CLASSES = 2
NB_EPOCH = 3
# input image dimensions
INPUT_SHAPE = (1, 120, 120, 120)
# number of convolutional filters to use
NB_FILTERS = 32
# size of pooling area for max pooling
POOL_SIZE = (2, 2, 2)
# convolution kernel size
KERNEL_SIZE = (5, 5, 5)
def make_arg_parser():
parser = argparse.ArgumentParser(description=DESCRIPTION)
parser.add_argument('-i', '--input', help='<PATH> The input folder', type=str, required=True)
parser.add_argument('-m', '--mapping_file', help='<PATH> To the sample mapping file', type=str, required=True)
parser.add_argument('-s', '--save', help='<Path> to save the model', type=str, required=True)
return parser
def generate_training_set(training_set, input_folder):
batch_samples = np.zeros((BATCH_SIZE, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2], INPUT_SHAPE[3]))
batch_features = np.zeros((BATCH_SIZE))
true_postive_set = training_set['cancer'] == 1
x = 0
while True:
for i in range(BATCH_SIZE):
if random.uniform(0, 1) >= .5:
index = random.choice(training_set[true_postive_set].index)
else:
index = random.choice(training_set[~true_postive_set].index)
mat_pth = os.path.join(input_folder, "%s.npz" % training_set.ix[index]['id'])
img = load(mat_pth).astype(np.float32)
img = img.reshape(1, *INPUT_SHAPE)
batch_samples[i] = img
batch_features[i] = training_set.ix[index]['cancer']
batch_features = np_utils.to_categorical(batch_features.astype(np.int), NB_CLASSES)
x += 1
print(x)
yield (batch_samples.astype(np.float32), batch_features)
def generate_test_set(test_set, input_folder):
x = 0
print(test_set['cancer'])
while True:
for i, row in test_set.iterrows():
mat_pth = os.path.join(input_folder, "%s.npz" % row['id'])
sample = load(mat_pth).astype(np.float32)
sample = sample.reshape(1, *INPUT_SHAPE)
feature = np.zeros((1))
feature[0] = row['cancer']
feature = np_utils.to_categorical(feature.astype(np.int), NB_CLASSES)
x += 1
print(x)
yield (sample, feature)
def build_network():
model = Sequential()
model.add(Convolution3D(NB_FILTERS, KERNEL_SIZE[0], KERNEL_SIZE[1], KERNEL_SIZE[2],
border_mode='valid',
input_shape=INPUT_SHAPE))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=POOL_SIZE))
model.add(Convolution3D(NB_FILTERS, KERNEL_SIZE[0], KERNEL_SIZE[1], KERNEL_SIZE[2]))
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=POOL_SIZE))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(8))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(NB_CLASSES))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer='adadelta',
metrics=['accuracy'])
print(model.summary())
return model
# Driver function
def main():
parser = make_arg_parser()
args = parser.parse_args()
df_mapping = read_mapping_file(args.mapping_file)
infiles = os.listdir(args.input)
infiles = [infile[:-4] for infile in infiles]
all_files = df_mapping[df_mapping['id'].isin(infiles)]
training_mask = np.zeros((all_files.shape[0]))
training_mask[np.random.uniform(0, 1, all_files.shape[0]) <= .9] = 1
training_mask = training_mask.astype(bool)
if os.path.exists(args.save):
model = load_model(args.save)
else:
model = build_network()
model.fit_generator(generate_training_set(all_files[training_mask], args.input), int(len(infiles)/BATCH_SIZE), NB_EPOCH)
save_model(model, args.save, overwrite=True)
score = model.evaluate_generator(generate_test_set(all_files[~training_mask], args.input), 250)
print(model.predict_generator(generate_test_set(all_files[~training_mask], args.input), 250))
print('Test score:', score[0])
print('Test accuracy:', score[1])
# Used for thread safety
if __name__ == '__main__':
main()
| [
"keras.layers.Activation",
"keras.models.save_model",
"keras.layers.Dense",
"os.path.exists",
"os.listdir",
"argparse.ArgumentParser",
"numpy.random.seed",
"random.uniform",
"random.choice",
"keras.layers.Flatten",
"keras.models.Sequential",
"keras.layers.Convolution3D",
"keras.layers.Dropou... | [((94, 111), 'random.seed', 'random.seed', (['(1337)'], {}), '(1337)\n', (105, 111), False, 'import random\n'), ((112, 132), 'numpy.random.seed', 'np.random.seed', (['(1337)'], {}), '(1337)\n', (126, 132), True, 'import numpy as np\n'), ((815, 863), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': 'DESCRIPTION'}), '(description=DESCRIPTION)\n', (838, 863), False, 'import argparse\n'), ((1270, 1360), 'numpy.zeros', 'np.zeros', (['(BATCH_SIZE, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2], INPUT_SHAPE[3])'], {}), '((BATCH_SIZE, INPUT_SHAPE[0], INPUT_SHAPE[1], INPUT_SHAPE[2],\n INPUT_SHAPE[3]))\n', (1278, 1360), True, 'import numpy as np\n'), ((1378, 1398), 'numpy.zeros', 'np.zeros', (['BATCH_SIZE'], {}), '(BATCH_SIZE)\n', (1386, 1398), True, 'import numpy as np\n'), ((2800, 2812), 'keras.models.Sequential', 'Sequential', ([], {}), '()\n', (2810, 2812), False, 'from keras.models import Sequential, load_model, save_model\n'), ((3768, 3804), 'common.read_mapping_file', 'read_mapping_file', (['args.mapping_file'], {}), '(args.mapping_file)\n', (3785, 3804), False, 'from common import load, read_mapping_file\n'), ((3820, 3842), 'os.listdir', 'os.listdir', (['args.input'], {}), '(args.input)\n', (3830, 3842), False, 'import os\n'), ((3974, 4002), 'numpy.zeros', 'np.zeros', (['all_files.shape[0]'], {}), '(all_files.shape[0])\n', (3982, 4002), True, 'import numpy as np\n'), ((4134, 4159), 'os.path.exists', 'os.path.exists', (['args.save'], {}), '(args.save)\n', (4148, 4159), False, 'import os\n'), ((2828, 2951), 'keras.layers.Convolution3D', 'Convolution3D', (['NB_FILTERS', 'KERNEL_SIZE[0]', 'KERNEL_SIZE[1]', 'KERNEL_SIZE[2]'], {'border_mode': '"""valid"""', 'input_shape': 'INPUT_SHAPE'}), "(NB_FILTERS, KERNEL_SIZE[0], KERNEL_SIZE[1], KERNEL_SIZE[2],\n border_mode='valid', input_shape=INPUT_SHAPE)\n", (2841, 2951), False, 'from keras.layers import Convolution3D, MaxPooling3D\n'), ((3019, 3037), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3029, 3037), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3053, 3086), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': 'POOL_SIZE'}), '(pool_size=POOL_SIZE)\n', (3065, 3086), False, 'from keras.layers import Convolution3D, MaxPooling3D\n'), ((3102, 3175), 'keras.layers.Convolution3D', 'Convolution3D', (['NB_FILTERS', 'KERNEL_SIZE[0]', 'KERNEL_SIZE[1]', 'KERNEL_SIZE[2]'], {}), '(NB_FILTERS, KERNEL_SIZE[0], KERNEL_SIZE[1], KERNEL_SIZE[2])\n', (3115, 3175), False, 'from keras.layers import Convolution3D, MaxPooling3D\n'), ((3191, 3209), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3201, 3209), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3225, 3258), 'keras.layers.MaxPooling3D', 'MaxPooling3D', ([], {'pool_size': 'POOL_SIZE'}), '(pool_size=POOL_SIZE)\n', (3237, 3258), False, 'from keras.layers import Convolution3D, MaxPooling3D\n'), ((3274, 3287), 'keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (3281, 3287), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3304, 3313), 'keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (3311, 3313), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3329, 3337), 'keras.layers.Dense', 'Dense', (['(8)'], {}), '(8)\n', (3334, 3337), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3353, 3371), 'keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (3363, 3371), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3387, 3399), 'keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (3394, 3399), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3415, 3432), 'keras.layers.Dense', 'Dense', (['NB_CLASSES'], {}), '(NB_CLASSES)\n', (3420, 3432), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((3448, 3469), 'keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (3458, 3469), False, 'from keras.layers import Dense, Dropout, Activation, Flatten\n'), ((4177, 4198), 'keras.models.load_model', 'load_model', (['args.save'], {}), '(args.save)\n', (4187, 4198), False, 'from keras.models import Sequential, load_model, save_model\n'), ((4378, 4422), 'keras.models.save_model', 'save_model', (['model', 'args.save'], {'overwrite': '(True)'}), '(model, args.save, overwrite=True)\n', (4388, 4422), False, 'from keras.models import Sequential, load_model, save_model\n'), ((1751, 1818), 'os.path.join', 'os.path.join', (['input_folder', "('%s.npz' % training_set.ix[index]['id'])"], {}), "(input_folder, '%s.npz' % training_set.ix[index]['id'])\n", (1763, 1818), False, 'import os\n'), ((2376, 2424), 'os.path.join', 'os.path.join', (['input_folder', "('%s.npz' % row['id'])"], {}), "(input_folder, '%s.npz' % row['id'])\n", (2388, 2424), False, 'import os\n'), ((2554, 2565), 'numpy.zeros', 'np.zeros', (['(1)'], {}), '(1)\n', (2562, 2565), True, 'import numpy as np\n'), ((4024, 4067), 'numpy.random.uniform', 'np.random.uniform', (['(0)', '(1)', 'all_files.shape[0]'], {}), '(0, 1, all_files.shape[0])\n', (4041, 4067), True, 'import numpy as np\n'), ((1530, 1550), 'random.uniform', 'random.uniform', (['(0)', '(1)'], {}), '(0, 1)\n', (1544, 1550), False, 'import random\n'), ((1582, 1633), 'random.choice', 'random.choice', (['training_set[true_postive_set].index'], {}), '(training_set[true_postive_set].index)\n', (1595, 1633), False, 'import random\n'), ((1676, 1728), 'random.choice', 'random.choice', (['training_set[~true_postive_set].index'], {}), '(training_set[~true_postive_set].index)\n', (1689, 1728), False, 'import random\n'), ((1837, 1850), 'common.load', 'load', (['mat_pth'], {}), '(mat_pth)\n', (1841, 1850), False, 'from common import load, read_mapping_file\n'), ((2446, 2459), 'common.load', 'load', (['mat_pth'], {}), '(mat_pth)\n', (2450, 2459), False, 'from common import load, read_mapping_file\n')] |
import os
from collections.abc import Iterable
from functools import partial
from math import ceil
from operator import getitem
from threading import Lock
from typing import Optional, Union
import numpy as np
import pandas as pd
import dask.array as da
from dask.base import tokenize
from dask.blockwise import BlockwiseDepDict, blockwise
from dask.dataframe.core import (
DataFrame,
Index,
Series,
_concat,
_emulate,
apply_and_enforce,
has_parallel_type,
new_dd_object,
)
from dask.dataframe.io.utils import DataFrameIOFunction
from dask.dataframe.shuffle import set_partition
from dask.dataframe.utils import (
check_meta,
insert_meta_param_description,
is_series_like,
make_meta,
)
from dask.delayed import delayed
from dask.highlevelgraph import HighLevelGraph
from dask.layers import DataFrameIOLayer
from dask.utils import M, _deprecated, funcname, is_arraylike
lock = Lock()
def _meta_from_array(x, columns=None, index=None, meta=None):
"""Create empty DataFrame or Series which has correct dtype"""
if x.ndim > 2:
raise ValueError(
"from_array does not input more than 2D array, got"
" array with shape %r" % (x.shape,)
)
if index is not None:
if not isinstance(index, Index):
raise ValueError("'index' must be an instance of dask.dataframe.Index")
index = index._meta
if meta is None:
meta = pd.DataFrame()
if getattr(x.dtype, "names", None) is not None:
# record array has named columns
if columns is None:
columns = list(x.dtype.names)
elif np.isscalar(columns):
raise ValueError("For a struct dtype, columns must be a list.")
elif not all(i in x.dtype.names for i in columns):
extra = sorted(set(columns).difference(x.dtype.names))
raise ValueError(f"dtype {x.dtype} doesn't have fields {extra}")
fields = x.dtype.fields
dtypes = [fields[n][0] if n in fields else "f8" for n in columns]
elif x.ndim == 1:
if np.isscalar(columns) or columns is None:
return meta._constructor_sliced(
[], name=columns, dtype=x.dtype, index=index
)
elif len(columns) == 1:
return meta._constructor(
np.array([], dtype=x.dtype), columns=columns, index=index
)
raise ValueError(
"For a 1d array, columns must be a scalar or single element list"
)
else:
if np.isnan(x.shape[1]):
raise ValueError("Shape along axis 1 must be known")
if columns is None:
columns = list(range(x.shape[1])) if x.ndim == 2 else [0]
elif len(columns) != x.shape[1]:
raise ValueError(
"Number of column names must match width of the array. "
f"Got {len(columns)} names for {x.shape[1]} columns"
)
dtypes = [x.dtype] * len(columns)
data = {c: np.array([], dtype=dt) for (c, dt) in zip(columns, dtypes)}
return meta._constructor(data, columns=columns, index=index)
def from_array(x, chunksize=50000, columns=None, meta=None):
"""Read any sliceable array into a Dask Dataframe
Uses getitem syntax to pull slices out of the array. The array need not be
a NumPy array but must support slicing syntax
x[50000:100000]
and have 2 dimensions:
x.ndim == 2
or have a record dtype:
x.dtype == [('name', 'O'), ('balance', 'i8')]
Parameters
----------
x : array_like
chunksize : int, optional
The number of rows per partition to use.
columns : list or string, optional
list of column names if DataFrame, single string if Series
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to use for partitions of
the Dask dataframe. By default, pandas DataFrame is used.
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series
"""
if isinstance(x, da.Array):
return from_dask_array(x, columns=columns, meta=meta)
meta = _meta_from_array(x, columns, meta=meta)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
token = tokenize(x, chunksize, columns)
name = "from_array-" + token
dsk = {}
for i in range(0, int(ceil(len(x) / chunksize))):
data = (getitem, x, slice(i * chunksize, (i + 1) * chunksize))
if is_series_like(meta):
dsk[name, i] = (type(meta), data, None, meta.dtype, meta.name)
else:
dsk[name, i] = (type(meta), data, None, meta.columns)
return new_dd_object(dsk, name, meta, divisions)
def from_pandas(
data: Union[pd.DataFrame, pd.Series],
npartitions: Optional[int] = None,
chunksize: Optional[int] = None,
sort: bool = True,
name: Optional[str] = None,
) -> DataFrame:
"""
Construct a Dask DataFrame from a Pandas DataFrame
This splits an in-memory Pandas dataframe into several parts and constructs
a dask.dataframe from those parts on which Dask.dataframe can operate in
parallel. By default, the input dataframe will be sorted by the index to
produce cleanly-divided partitions (with known divisions). To preserve the
input ordering, make sure the input index is monotonically-increasing. The
``sort=False`` option will also avoid reordering, but will not result in
known divisions.
Note that, despite parallelism, Dask.dataframe may not always be faster
than Pandas. We recommend that you stay with Pandas for as long as
possible before switching to Dask.dataframe.
Parameters
----------
data : pandas.DataFrame or pandas.Series
The DataFrame/Series with which to construct a Dask DataFrame/Series
npartitions : int, optional
The number of partitions of the index to create. Note that depending on
the size and index of the dataframe, the output may have fewer
partitions than requested.
chunksize : int, optional
The number of rows per index partition to use.
sort: bool
Sort the input by index first to obtain cleanly divided partitions
(with known divisions). If False, the input will not be sorted, and
all divisions will be set to None. Default is True.
name: string, optional
An optional keyname for the dataframe. Defaults to hashing the input
Returns
-------
dask.DataFrame or dask.Series
A dask DataFrame/Series partitioned along the index
Examples
--------
>>> from dask.dataframe import from_pandas
>>> df = pd.DataFrame(dict(a=list('aabbcc'), b=list(range(6))),
... index=pd.date_range(start='20100101', periods=6))
>>> ddf = from_pandas(df, npartitions=3)
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
>>> ddf = from_pandas(df.a, npartitions=3) # Works with Series too!
>>> ddf.divisions # doctest: +NORMALIZE_WHITESPACE
(Timestamp('2010-01-01 00:00:00', freq='D'),
Timestamp('2010-01-03 00:00:00', freq='D'),
Timestamp('2010-01-05 00:00:00', freq='D'),
Timestamp('2010-01-06 00:00:00', freq='D'))
Raises
------
TypeError
If something other than a ``pandas.DataFrame`` or ``pandas.Series`` is
passed in.
See Also
--------
from_array : Construct a dask.DataFrame from an array that has record dtype
read_csv : Construct a dask.DataFrame from a CSV file
"""
if isinstance(getattr(data, "index", None), pd.MultiIndex):
raise NotImplementedError("Dask does not support MultiIndex Dataframes.")
if not has_parallel_type(data):
raise TypeError("Input must be a pandas DataFrame or Series.")
if (npartitions is None) == (none_chunksize := (chunksize is None)):
raise ValueError("Exactly one of npartitions and chunksize must be specified.")
nrows = len(data)
if none_chunksize:
if not isinstance(npartitions, int):
raise TypeError(
"Please provide npartitions as an int, or possibly as None if you specify chunksize."
)
chunksize = int(ceil(nrows / npartitions))
elif not isinstance(chunksize, int):
raise TypeError(
"Please provide chunksize as an int, or possibly as None if you specify npartitions."
)
name = name or ("from_pandas-" + tokenize(data, chunksize))
if not nrows:
return new_dd_object({(name, 0): data}, name, data, [None, None])
if data.index.isna().any() and not data.index.is_numeric():
raise NotImplementedError(
"Index in passed data is non-numeric and contains nulls, which Dask does not entirely support.\n"
"Consider passing `data.loc[~data.isna()]` instead."
)
if sort:
if not data.index.is_monotonic_increasing:
data = data.sort_index(ascending=True)
divisions, locations = sorted_division_locations(
data.index, chunksize=chunksize
)
else:
locations = list(range(0, nrows, chunksize)) + [len(data)]
divisions = [None] * len(locations)
dsk = {
(name, i): data.iloc[start:stop]
for i, (start, stop) in enumerate(zip(locations[:-1], locations[1:]))
}
return new_dd_object(dsk, name, data, divisions)
@_deprecated(after_version="2022.02.1")
def from_bcolz(x, chunksize=None, categorize=True, index=None, lock=lock, **kwargs):
"""Read BColz CTable into a Dask Dataframe
BColz is a fast on-disk compressed column store with careful attention
given to compression. https://bcolz.readthedocs.io/en/latest/
Parameters
----------
x : bcolz.ctable
chunksize : int, optional
The size(rows) of blocks to pull out from ctable.
categorize : bool, defaults to True
Automatically categorize all string dtypes
index : string, optional
Column to make the index
lock: bool or Lock
Lock to use when reading or False for no lock (not-thread-safe)
See Also
--------
from_array: more generic function not optimized for bcolz
"""
if lock is True:
lock = Lock()
import bcolz
import dask.array as da
if isinstance(x, str):
x = bcolz.ctable(rootdir=x)
bc_chunklen = max(x[name].chunklen for name in x.names)
if chunksize is None and bc_chunklen > 10000:
chunksize = bc_chunklen
categories = dict()
if categorize:
for name in x.names:
if (
np.issubdtype(x.dtype[name], np.string_)
or np.issubdtype(x.dtype[name], np.unicode_)
or np.issubdtype(x.dtype[name], np.object_)
):
a = da.from_array(x[name], chunks=(chunksize * len(x.names),))
categories[name] = da.unique(a).compute()
columns = tuple(x.dtype.names)
divisions = tuple(range(0, len(x), chunksize))
divisions = divisions + (len(x) - 1,)
if x.rootdir:
token = tokenize(
(x.rootdir, os.path.getmtime(x.rootdir)),
chunksize,
categorize,
index,
kwargs,
)
else:
token = tokenize(
(id(x), x.shape, x.dtype), chunksize, categorize, index, kwargs
)
new_name = "from_bcolz-" + token
dsk = {
(new_name, i): (
dataframe_from_ctable,
x,
(slice(i * chunksize, (i + 1) * chunksize),),
columns,
categories,
lock,
)
for i in range(0, int(ceil(len(x) / chunksize)))
}
meta = dataframe_from_ctable(x, slice(0, 0), columns, categories, lock)
result = DataFrame(dsk, new_name, meta, divisions)
if index:
assert index in x.names
a = da.from_array(x[index], chunks=(chunksize * len(x.names),))
q = np.linspace(0, 100, len(x) // chunksize + 2)
divisions = tuple(da.percentile(a, q).compute())
return set_partition(result, index, divisions, **kwargs)
else:
return result
def dataframe_from_ctable(x, slc, columns=None, categories=None, lock=lock):
"""Get DataFrame from bcolz.ctable
Parameters
----------
x: bcolz.ctable
slc: slice
columns: list of column names or None
>>> import bcolz
>>> x = bcolz.ctable([[1, 2, 3, 4], [10, 20, 30, 40]], names=['a', 'b'])
>>> dataframe_from_ctable(x, slice(1, 3))
a b
1 2 20
2 3 30
>>> dataframe_from_ctable(x, slice(1, 3), columns=['b'])
b
1 20
2 30
>>> dataframe_from_ctable(x, slice(1, 3), columns='b')
1 20
2 30
Name: b, dtype: int...
"""
import bcolz
if columns is None:
columns = x.dtype.names
if isinstance(columns, tuple):
columns = list(columns)
x = x[columns]
if type(slc) is slice:
start = slc.start
stop = slc.stop if slc.stop < len(x) else len(x)
else:
start = slc[0].start
stop = slc[0].stop if slc[0].stop < len(x) else len(x)
idx = pd.Index(range(start, stop))
if lock:
lock.acquire()
try:
if isinstance(x, bcolz.ctable):
chunks = [x[name][slc] for name in columns]
if categories is not None:
chunks = [
pd.Categorical.from_codes(
np.searchsorted(categories[name], chunk), categories[name], True
)
if name in categories
else chunk
for name, chunk in zip(columns, chunks)
]
result = pd.DataFrame(
dict(zip(columns, chunks)), columns=columns, index=idx
)
elif isinstance(x, bcolz.carray):
chunk = x[slc]
if categories is not None and columns and columns in categories:
chunk = pd.Categorical.from_codes(
np.searchsorted(categories[columns], chunk),
categories[columns],
True,
)
result = pd.Series(chunk, name=columns, index=idx)
finally:
if lock:
lock.release()
return result
def _partition_from_array(data, index=None, initializer=None, **kwargs):
"""Create a Dask partition for either a DataFrame or Series.
Designed to be used with :func:`dask.blockwise.blockwise`. ``data`` is the array
from which the partition will be created. ``index`` can be:
1. ``None``, in which case each partition has an independent RangeIndex
2. a `tuple` with two elements, the start and stop values for a RangeIndex for
this partition, which gives a continuously varying RangeIndex over the
whole Dask DataFrame
3. an instance of a ``pandas.Index`` or a subclass thereof
The ``kwargs`` _must_ contain an ``initializer`` key which is set by calling
``type(meta)``.
"""
if isinstance(index, tuple):
index = pd.RangeIndex(*index)
return initializer(data, index=index, **kwargs)
def from_dask_array(x, columns=None, index=None, meta=None):
"""Create a Dask DataFrame from a Dask Array.
Converts a 2d array into a DataFrame and a 1d array into a Series.
Parameters
----------
x : da.Array
columns : list or string
list of column names if DataFrame, single string if Series
index : dask.dataframe.Index, optional
An optional *dask* Index to use for the output Series or DataFrame.
The default output index depends on whether `x` has any unknown
chunks. If there are any unknown chunks, the output has ``None``
for all the divisions (one per chunk). If all the chunks are known,
a default index with known divisions is created.
Specifying `index` can be useful if you're conforming a Dask Array
to an existing dask Series or DataFrame, and you would like the
indices to match.
meta : object, optional
An optional `meta` parameter can be passed for dask
to specify the concrete dataframe type to be returned.
By default, pandas DataFrame is used.
Examples
--------
>>> import dask.array as da
>>> import dask.dataframe as dd
>>> x = da.ones((4, 2), chunks=(2, 2))
>>> df = dd.io.from_dask_array(x, columns=['a', 'b'])
>>> df.compute()
a b
0 1.0 1.0
1 1.0 1.0
2 1.0 1.0
3 1.0 1.0
See Also
--------
dask.bag.to_dataframe: from dask.bag
dask.dataframe._Frame.values: Reverse conversion
dask.dataframe._Frame.to_records: Reverse conversion
"""
meta = _meta_from_array(x, columns, index, meta=meta)
name = "from-dask-array-" + tokenize(x, columns)
graph_dependencies = [x]
arrays_and_indices = [x.name, "ij" if x.ndim == 2 else "i"]
numblocks = {x.name: x.numblocks}
if index is not None:
# An index is explicitly given by the caller, so we can pass it through to the
# initializer after a few checks.
if index.npartitions != x.numblocks[0]:
msg = (
"The index and array have different numbers of blocks. "
"({} != {})".format(index.npartitions, x.numblocks[0])
)
raise ValueError(msg)
divisions = index.divisions
graph_dependencies.append(index)
arrays_and_indices.extend([index._name, "i"])
numblocks[index._name] = (index.npartitions,)
elif np.isnan(sum(x.shape)):
# The shape of the incoming array is not known in at least one dimension. As
# such, we can't create an index for the entire output DataFrame and we set
# the divisions to None to represent that.
divisions = [None] * (len(x.chunks[0]) + 1)
else:
# The shape of the incoming array is known and we don't have an explicit index.
# Create a mapping of chunk number in the incoming array to
# (start row, stop row) tuples. These tuples will be used to create a sequential
# RangeIndex later on that is continuous over the whole DataFrame.
divisions = [0]
stop = 0
index_mapping = {}
for i, increment in enumerate(x.chunks[0]):
stop += increment
index_mapping[(i,)] = (divisions[i], stop)
divisions.append(stop)
divisions[-1] -= 1
arrays_and_indices.extend([BlockwiseDepDict(mapping=index_mapping), "i"])
if is_series_like(meta):
kwargs = {"dtype": x.dtype, "name": meta.name, "initializer": type(meta)}
else:
kwargs = {"columns": meta.columns, "initializer": type(meta)}
blk = blockwise(
_partition_from_array,
name,
"i",
*arrays_and_indices,
numblocks=numblocks,
concatenate=True,
# kwargs passed through to the DataFrame/Series initializer
**kwargs,
)
graph = HighLevelGraph.from_collections(name, blk, dependencies=graph_dependencies)
return new_dd_object(graph, name, meta, divisions)
def _link(token, result):
"""A dummy function to link results together in a graph
We use this to enforce an artificial sequential ordering on tasks that
don't explicitly pass around a shared resource
"""
return None
def _df_to_bag(df, index=False, format="tuple"):
if isinstance(df, pd.DataFrame):
if format == "tuple":
return list(map(tuple, df.itertuples(index)))
elif format == "dict":
if index:
return [
{**{"index": idx}, **values}
for values, idx in zip(df.to_dict("records"), df.index)
]
else:
return df.to_dict(orient="records")
elif isinstance(df, pd.Series):
if format == "tuple":
return list(df.items()) if index else list(df)
elif format == "dict":
return df.to_frame().to_dict(orient="records")
def to_bag(df, index=False, format="tuple"):
"""Create Dask Bag from a Dask DataFrame
Parameters
----------
index : bool, optional
If True, the elements are tuples of ``(index, value)``, otherwise
they're just the ``value``. Default is False.
format : {"tuple", "dict", "frame"}, optional
Whether to return a bag of tuples, dictionaries, or
dataframe-like objects. Default is "tuple". If "frame",
the original partitions of ``df`` will not be transformed
in any way.
Examples
--------
>>> bag = df.to_bag() # doctest: +SKIP
"""
from dask.bag.core import Bag
if not isinstance(df, (DataFrame, Series)):
raise TypeError("df must be either DataFrame or Series")
name = "to_bag-" + tokenize(df, index, format)
if format == "frame":
# Use existing graph and name of df, but
# drop meta to produce a Bag collection
dsk = df.dask
name = df._name
else:
dsk = {
(name, i): (_df_to_bag, block, index, format)
for (i, block) in enumerate(df.__dask_keys__())
}
dsk.update(df.__dask_optimize__(df.__dask_graph__(), df.__dask_keys__()))
return Bag(dsk, name, df.npartitions)
def to_records(df):
"""Create Dask Array from a Dask Dataframe
Warning: This creates a dask.array without precise shape information.
Operations that depend on shape information, like slicing or reshaping,
will not work.
Examples
--------
>>> df.to_records() # doctest: +SKIP
See Also
--------
dask.dataframe._Frame.values
dask.dataframe.from_dask_array
"""
return df.map_partitions(M.to_records)
# TODO: type this -- causes lots of papercuts
@insert_meta_param_description
def from_delayed(
dfs,
meta=None,
divisions=None,
prefix="from-delayed",
verify_meta=True,
):
"""Create Dask DataFrame from many Dask Delayed objects
Parameters
----------
dfs : list of Delayed or Future
An iterable of ``dask.delayed.Delayed`` objects, such as come from
``dask.delayed`` or an iterable of ``distributed.Future`` objects,
such as come from ``client.submit`` interface. These comprise the individual
partitions of the resulting dataframe.
$META
divisions : tuple, str, optional
Partition boundaries along the index.
For tuple, see https://docs.dask.org/en/latest/dataframe-design.html#partitions
For string 'sorted' will compute the delayed values to find index
values. Assumes that the indexes are mutually sorted.
If None, then won't use index information
prefix : str, optional
Prefix to prepend to the keys.
verify_meta : bool, optional
If True check that the partitions have consistent metadata, defaults to True.
"""
from dask.delayed import Delayed
if isinstance(dfs, Delayed):
dfs = [dfs]
dfs = [
delayed(df) if not isinstance(df, Delayed) and hasattr(df, "key") else df
for df in dfs
]
for df in dfs:
if not isinstance(df, Delayed):
raise TypeError("Expected Delayed object, got %s" % type(df).__name__)
if meta is None:
meta = delayed(make_meta)(dfs[0]).compute()
else:
meta = make_meta(meta)
if not dfs:
dfs = [delayed(make_meta)(meta)]
if divisions is None or divisions == "sorted":
divs = [None] * (len(dfs) + 1)
else:
divs = tuple(divisions)
if len(divs) != len(dfs) + 1:
raise ValueError("divisions should be a tuple of len(dfs) + 1")
name = prefix + "-" + tokenize(*dfs)
layer = DataFrameIOLayer(
name=name,
columns=None,
inputs=BlockwiseDepDict(
{(i,): inp.key for i, inp in enumerate(dfs)},
produces_keys=True,
),
io_func=partial(check_meta, meta=meta, funcname="from_delayed")
if verify_meta
else lambda x: x,
)
df = new_dd_object(
HighLevelGraph.from_collections(name, layer, dfs), name, meta, divs
)
if divisions == "sorted":
from dask.dataframe.shuffle import compute_and_set_divisions
df = compute_and_set_divisions(df)
return df
def sorted_division_locations(seq, npartitions=None, chunksize=None):
"""Find division locations and values in sorted list
Examples
--------
>>> L = ['A', 'B', 'C', 'D', 'E', 'F']
>>> sorted_division_locations(L, chunksize=2)
(['A', 'C', 'E', 'F'], [0, 2, 4, 6])
>>> sorted_division_locations(L, chunksize=3)
(['A', 'D', 'F'], [0, 3, 6])
>>> L = ['A', 'A', 'A', 'A', 'B', 'B', 'B', 'C']
>>> sorted_division_locations(L, chunksize=3)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(L, chunksize=2)
(['A', 'B', 'C'], [0, 4, 8])
>>> sorted_division_locations(['A'], chunksize=2)
(['A', 'A'], [0, 1])
"""
if (npartitions is None) == (chunksize is None):
raise ValueError("Exactly one of npartitions and chunksize must be specified.")
if npartitions:
chunksize = ceil(len(seq) / npartitions)
positions = [0]
values = [seq[0]]
for pos in range(0, len(seq), chunksize):
if pos <= positions[-1]:
continue
while pos + 1 < len(seq) and seq[pos - 1] == seq[pos]:
pos += 1
values.append(seq[pos])
if pos == len(seq) - 1:
pos += 1
positions.append(pos)
if positions[-1] != len(seq):
positions.append(len(seq))
values.append(seq[-1])
return values, positions
class _PackedArgCallable(DataFrameIOFunction):
"""Packed-argument wrapper for DataFrameIOFunction
This is a private helper class for ``from_map``. This class
ensures that packed positional arguments will be expanded
before the underlying function (``func``) is called. This class
also handles optional metadata enforcement and column projection
(when ``func`` satisfies the ``DataFrameIOFunction`` protocol).
"""
def __init__(
self,
func,
args=None,
kwargs=None,
meta=None,
packed=False,
enforce_metadata=False,
):
self.func = func
self.args = args
self.kwargs = kwargs
self.meta = meta
self.enforce_metadata = enforce_metadata
self.packed = packed
self.is_dataframe_io_func = isinstance(self.func, DataFrameIOFunction)
@property
def columns(self):
if self.is_dataframe_io_func:
return self.func.columns
return None
def project_columns(self, columns):
if self.is_dataframe_io_func:
return _PackedArgCallable(
self.func.project_columns(columns),
args=self.args,
kwargs=self.kwargs,
meta=self.meta,
packed=self.packed,
enforce_metadata=self.enforce_metadata,
)
return self
def __call__(self, packed_arg):
if not self.packed:
packed_arg = [packed_arg]
if self.enforce_metadata:
return apply_and_enforce(
*packed_arg,
*(self.args or []),
_func=self.func,
_meta=self.meta,
**(self.kwargs or {}),
)
return self.func(
*packed_arg,
*(self.args or []),
**(self.kwargs or {}),
)
@insert_meta_param_description
def from_map(
func,
*iterables,
args=None,
meta=None,
divisions=None,
label=None,
token=None,
enforce_metadata=True,
**kwargs,
):
"""Create a DataFrame collection from a custom function map
WARNING: The ``from_map`` API is experimental, and stability is not
yet guaranteed. Use at your own risk!
Parameters
----------
func : callable
Function used to create each partition. If ``func`` satisfies the
``DataFrameIOFunction`` protocol, column projection will be enabled.
*iterables : Iterable objects
Iterable objects to map to each output partition. All iterables must
be the same length. This length determines the number of partitions
in the output collection (only one element of each iterable will
be passed to ``func`` for each partition).
args : list or tuple, optional
Positional arguments to broadcast to each output partition. Note
that these arguments will always be passed to ``func`` after the
``iterables`` positional arguments.
$META
divisions : tuple, str, optional
Partition boundaries along the index.
For tuple, see https://docs.dask.org/en/latest/dataframe-design.html#partitions
For string 'sorted' will compute the delayed values to find index
values. Assumes that the indexes are mutually sorted.
If None, then won't use index information
label : str, optional
String to use as the function-name label in the output
collection-key names.
token : str, optional
String to use as the "token" in the output collection-key names.
enforce_metadata : bool, default True
Whether to enforce at runtime that the structure of the DataFrame
produced by ``func`` actually matches the structure of ``meta``.
This will rename and reorder columns for each partition,
and will raise an error if this doesn't work or types don't match.
**kwargs:
Key-word arguments to broadcast to each output partition. These
same arguments will be passed to ``func`` for every output partition.
Examples
--------
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> func = lambda x, size=0: pd.Series([x] * size)
>>> inputs = ["A", "B"]
>>> dd.from_map(func, inputs, size=2).compute()
0 A
1 A
0 B
1 B
dtype: object
This API can also be used as an alternative to other file-based
IO functions, like ``read_parquet`` (which are already just
``from_map`` wrapper functions):
>>> import pandas as pd
>>> import dask.dataframe as dd
>>> paths = ["0.parquet", "1.parquet", "2.parquet"]
>>> dd.from_map(pd.read_parquet, paths).head() # doctest: +SKIP
name
timestamp
2000-01-01 00:00:00 Laura
2000-01-01 00:00:01 Oliver
2000-01-01 00:00:02 Alice
2000-01-01 00:00:03 Victor
2000-01-01 00:00:04 Bob
Since ``from_map`` allows you to map an arbitrary function
to any number of iterable objects, it can be a very convenient
means of implementing functionality that may be missing from
from other DataFrame-creation methods. For example, if you
happen to have apriori knowledge about the number of rows
in each of the files in a dataset, you can generate a
DataFrame collection with a global RangeIndex:
>>> import pandas as pd
>>> import numpy as np
>>> import dask.dataframe as dd
>>> paths = ["0.parquet", "1.parquet", "2.parquet"]
>>> file_sizes = [86400, 86400, 86400]
>>> def func(path, row_offset):
... # Read parquet file and set RangeIndex offset
... df = pd.read_parquet(path)
... return df.set_index(
... pd.RangeIndex(row_offset, row_offset+len(df))
... )
>>> def get_ddf(paths, file_sizes):
... offsets = [0] + list(np.cumsum(file_sizes))
... return dd.from_map(
... func, paths, offsets[:-1], divisions=offsets
... )
>>> ddf = get_ddf(paths, file_sizes) # doctest: +SKIP
>>> ddf.index # doctest: +SKIP
Dask Index Structure:
npartitions=3
0 int64
86400 ...
172800 ...
259200 ...
dtype: int64
Dask Name: myfunc, 6 tasks
See Also
--------
dask.dataframe.from_delayed
dask.layers.DataFrameIOLayer
"""
# Input validation
if not callable(func):
raise ValueError("`func` argument must be `callable`")
lengths = set()
iterables = list(iterables)
for i, iterable in enumerate(iterables):
if not isinstance(iterable, Iterable):
raise ValueError(
f"All elements of `iterables` must be Iterable, got {type(iterable)}"
)
try:
lengths.add(len(iterable))
except (AttributeError, TypeError):
iterables[i] = list(iterable)
lengths.add(len(iterables[i]))
if len(lengths) == 0:
raise ValueError("`from_map` requires at least one Iterable input")
elif len(lengths) > 1:
raise ValueError("All `iterables` must have the same length")
if lengths == {0}:
raise ValueError("All `iterables` must have a non-zero length")
# Check for `produces_tasks` and `creation_info`.
# These options are included in the function signature,
# because they are not intended for "public" use.
produces_tasks = kwargs.pop("produces_tasks", False)
creation_info = kwargs.pop("creation_info", None)
if produces_tasks or len(iterables) == 1:
if len(iterables) > 1:
# Tasks are not detected correctly when they are "packed"
# within an outer list/tuple
raise ValueError(
"Multiple iterables not supported when produces_tasks=True"
)
inputs = iterables[0]
packed = False
else:
inputs = list(zip(*iterables))
packed = True
# Define collection name
label = label or funcname(func)
token = token or tokenize(
func, meta, inputs, args, divisions, enforce_metadata, **kwargs
)
name = f"{label}-{token}"
# Get "projectable" column selection.
# Note that this relies on the IO function
# ducktyping with DataFrameIOFunction
column_projection = func.columns if isinstance(func, DataFrameIOFunction) else None
# NOTE: Most of the metadata-handling logic used here
# is copied directly from `map_partitions`
if meta is None:
meta = _emulate(
func,
*(inputs[0] if packed else inputs[:1]),
*(args or []),
udf=True,
**kwargs,
)
meta_is_emulated = True
else:
meta = make_meta(meta)
meta_is_emulated = False
if not (has_parallel_type(meta) or is_arraylike(meta) and meta.shape):
if not meta_is_emulated:
raise TypeError(
"Meta is not valid, `from_map` expects output to be a pandas object. "
"Try passing a pandas object as meta or a dict or tuple representing the "
"(name, dtype) of the columns."
)
# If `meta` is not a pandas object, the concatenated results will be a
# different type
meta = make_meta(_concat([meta]))
# Ensure meta is empty DataFrame
meta = make_meta(meta)
# Define io_func
if packed or args or kwargs or enforce_metadata:
io_func = _PackedArgCallable(
func,
args=args,
kwargs=kwargs,
meta=meta if enforce_metadata else None,
enforce_metadata=enforce_metadata,
packed=packed,
)
else:
io_func = func
# Construct DataFrameIOLayer
layer = DataFrameIOLayer(
name,
column_projection,
inputs,
io_func,
label=label,
produces_tasks=produces_tasks,
creation_info=creation_info,
)
# Return new DataFrame-collection object
divisions = divisions or [None] * (len(inputs) + 1)
graph = HighLevelGraph.from_collections(name, layer, dependencies=[])
return new_dd_object(graph, name, meta, divisions)
DataFrame.to_records.__doc__ = to_records.__doc__
DataFrame.to_bag.__doc__ = to_bag.__doc__
| [
"dask.utils._deprecated",
"dask.bag.core.Bag",
"dask.array.unique",
"dask.utils.is_arraylike",
"numpy.array",
"dask.base.tokenize",
"dask.blockwise.blockwise",
"pandas.RangeIndex",
"bcolz.ctable",
"numpy.isscalar",
"dask.dataframe.core.DataFrame",
"numpy.searchsorted",
"threading.Lock",
"n... | [((926, 932), 'threading.Lock', 'Lock', ([], {}), '()\n', (930, 932), False, 'from threading import Lock\n'), ((9674, 9712), 'dask.utils._deprecated', '_deprecated', ([], {'after_version': '"""2022.02.1"""'}), "(after_version='2022.02.1')\n", (9685, 9712), False, 'from dask.utils import M, _deprecated, funcname, is_arraylike\n'), ((4348, 4379), 'dask.base.tokenize', 'tokenize', (['x', 'chunksize', 'columns'], {}), '(x, chunksize, columns)\n', (4356, 4379), False, 'from dask.base import tokenize\n'), ((4751, 4792), 'dask.dataframe.core.new_dd_object', 'new_dd_object', (['dsk', 'name', 'meta', 'divisions'], {}), '(dsk, name, meta, divisions)\n', (4764, 4792), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((9629, 9670), 'dask.dataframe.core.new_dd_object', 'new_dd_object', (['dsk', 'name', 'data', 'divisions'], {}), '(dsk, name, data, divisions)\n', (9642, 9670), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((12043, 12084), 'dask.dataframe.core.DataFrame', 'DataFrame', (['dsk', 'new_name', 'meta', 'divisions'], {}), '(dsk, new_name, meta, divisions)\n', (12052, 12084), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((18842, 18862), 'dask.dataframe.utils.is_series_like', 'is_series_like', (['meta'], {}), '(meta)\n', (18856, 18862), False, 'from dask.dataframe.utils import check_meta, insert_meta_param_description, is_series_like, make_meta\n'), ((19037, 19155), 'dask.blockwise.blockwise', 'blockwise', (['_partition_from_array', 'name', '"""i"""', '*arrays_and_indices'], {'numblocks': 'numblocks', 'concatenate': '(True)'}), "(_partition_from_array, name, 'i', *arrays_and_indices, numblocks=\n numblocks, concatenate=True, **kwargs)\n", (19046, 19155), False, 'from dask.blockwise import BlockwiseDepDict, blockwise\n'), ((19295, 19370), 'dask.highlevelgraph.HighLevelGraph.from_collections', 'HighLevelGraph.from_collections', (['name', 'blk'], {'dependencies': 'graph_dependencies'}), '(name, blk, dependencies=graph_dependencies)\n', (19326, 19370), False, 'from dask.highlevelgraph import HighLevelGraph\n'), ((19382, 19425), 'dask.dataframe.core.new_dd_object', 'new_dd_object', (['graph', 'name', 'meta', 'divisions'], {}), '(graph, name, meta, divisions)\n', (19395, 19425), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((21581, 21611), 'dask.bag.core.Bag', 'Bag', (['dsk', 'name', 'df.npartitions'], {}), '(dsk, name, df.npartitions)\n', (21584, 21611), False, 'from dask.bag.core import Bag\n'), ((35362, 35377), 'dask.dataframe.utils.make_meta', 'make_meta', (['meta'], {}), '(meta)\n', (35371, 35377), False, 'from dask.dataframe.utils import check_meta, insert_meta_param_description, is_series_like, make_meta\n'), ((35775, 35910), 'dask.layers.DataFrameIOLayer', 'DataFrameIOLayer', (['name', 'column_projection', 'inputs', 'io_func'], {'label': 'label', 'produces_tasks': 'produces_tasks', 'creation_info': 'creation_info'}), '(name, column_projection, inputs, io_func, label=label,\n produces_tasks=produces_tasks, creation_info=creation_info)\n', (35791, 35910), False, 'from dask.layers import DataFrameIOLayer\n'), ((36084, 36145), 'dask.highlevelgraph.HighLevelGraph.from_collections', 'HighLevelGraph.from_collections', (['name', 'layer'], {'dependencies': '[]'}), '(name, layer, dependencies=[])\n', (36115, 36145), False, 'from dask.highlevelgraph import HighLevelGraph\n'), ((36157, 36200), 'dask.dataframe.core.new_dd_object', 'new_dd_object', (['graph', 'name', 'meta', 'divisions'], {}), '(graph, name, meta, divisions)\n', (36170, 36200), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((1449, 1463), 'pandas.DataFrame', 'pd.DataFrame', ([], {}), '()\n', (1461, 1463), True, 'import pandas as pd\n'), ((3005, 3027), 'numpy.array', 'np.array', (['[]'], {'dtype': 'dt'}), '([], dtype=dt)\n', (3013, 3027), True, 'import numpy as np\n'), ((4563, 4583), 'dask.dataframe.utils.is_series_like', 'is_series_like', (['meta'], {}), '(meta)\n', (4577, 4583), False, 'from dask.dataframe.utils import check_meta, insert_meta_param_description, is_series_like, make_meta\n'), ((7968, 7991), 'dask.dataframe.core.has_parallel_type', 'has_parallel_type', (['data'], {}), '(data)\n', (7985, 7991), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((8787, 8845), 'dask.dataframe.core.new_dd_object', 'new_dd_object', (['{(name, 0): data}', 'name', 'data', '[None, None]'], {}), '({(name, 0): data}, name, data, [None, None])\n', (8800, 8845), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((10509, 10515), 'threading.Lock', 'Lock', ([], {}), '()\n', (10513, 10515), False, 'from threading import Lock\n'), ((10603, 10626), 'bcolz.ctable', 'bcolz.ctable', ([], {'rootdir': 'x'}), '(rootdir=x)\n', (10615, 10626), False, 'import bcolz\n'), ((12333, 12382), 'dask.dataframe.shuffle.set_partition', 'set_partition', (['result', 'index', 'divisions'], {}), '(result, index, divisions, **kwargs)\n', (12346, 12382), False, 'from dask.dataframe.shuffle import set_partition\n'), ((15350, 15371), 'pandas.RangeIndex', 'pd.RangeIndex', (['*index'], {}), '(*index)\n', (15363, 15371), True, 'import pandas as pd\n'), ((17096, 17116), 'dask.base.tokenize', 'tokenize', (['x', 'columns'], {}), '(x, columns)\n', (17104, 17116), False, 'from dask.base import tokenize\n'), ((21137, 21164), 'dask.base.tokenize', 'tokenize', (['df', 'index', 'format'], {}), '(df, index, format)\n', (21145, 21164), False, 'from dask.base import tokenize\n'), ((23684, 23699), 'dask.dataframe.utils.make_meta', 'make_meta', (['meta'], {}), '(meta)\n', (23693, 23699), False, 'from dask.dataframe.utils import check_meta, insert_meta_param_description, is_series_like, make_meta\n'), ((24032, 24046), 'dask.base.tokenize', 'tokenize', (['*dfs'], {}), '(*dfs)\n', (24040, 24046), False, 'from dask.base import tokenize\n'), ((24411, 24460), 'dask.highlevelgraph.HighLevelGraph.from_collections', 'HighLevelGraph.from_collections', (['name', 'layer', 'dfs'], {}), '(name, layer, dfs)\n', (24442, 24460), False, 'from dask.highlevelgraph import HighLevelGraph\n'), ((24599, 24628), 'dask.dataframe.shuffle.compute_and_set_divisions', 'compute_and_set_divisions', (['df'], {}), '(df)\n', (24624, 24628), False, 'from dask.dataframe.shuffle import compute_and_set_divisions\n'), ((34006, 34020), 'dask.utils.funcname', 'funcname', (['func'], {}), '(func)\n', (34014, 34020), False, 'from dask.utils import M, _deprecated, funcname, is_arraylike\n'), ((34042, 34115), 'dask.base.tokenize', 'tokenize', (['func', 'meta', 'inputs', 'args', 'divisions', 'enforce_metadata'], {}), '(func, meta, inputs, args, divisions, enforce_metadata, **kwargs)\n', (34050, 34115), False, 'from dask.base import tokenize\n'), ((34522, 34616), 'dask.dataframe.core._emulate', '_emulate', (['func', '*(inputs[0] if packed else inputs[:1])', '*(args or [])'], {'udf': '(True)'}), '(func, *(inputs[0] if packed else inputs[:1]), *(args or []), udf=\n True, **kwargs)\n', (34530, 34616), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((34740, 34755), 'dask.dataframe.utils.make_meta', 'make_meta', (['meta'], {}), '(meta)\n', (34749, 34755), False, 'from dask.dataframe.utils import check_meta, insert_meta_param_description, is_series_like, make_meta\n'), ((1641, 1661), 'numpy.isscalar', 'np.isscalar', (['columns'], {}), '(columns)\n', (1652, 1661), True, 'import numpy as np\n'), ((2535, 2555), 'numpy.isnan', 'np.isnan', (['x.shape[1]'], {}), '(x.shape[1])\n', (2543, 2555), True, 'import numpy as np\n'), ((8487, 8512), 'math.ceil', 'ceil', (['(nrows / npartitions)'], {}), '(nrows / npartitions)\n', (8491, 8512), False, 'from math import ceil\n'), ((8726, 8751), 'dask.base.tokenize', 'tokenize', (['data', 'chunksize'], {}), '(data, chunksize)\n', (8734, 8751), False, 'from dask.base import tokenize\n'), ((23340, 23351), 'dask.delayed.delayed', 'delayed', (['df'], {}), '(df)\n', (23347, 23351), False, 'from dask.delayed import delayed\n'), ((27569, 27679), 'dask.dataframe.core.apply_and_enforce', 'apply_and_enforce', (['*packed_arg', '*(self.args or [])'], {'_func': 'self.func', '_meta': 'self.meta'}), '(*packed_arg, *(self.args or []), _func=self.func, _meta=\n self.meta, **self.kwargs or {})\n', (27586, 27679), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((34802, 34825), 'dask.dataframe.core.has_parallel_type', 'has_parallel_type', (['meta'], {}), '(meta)\n', (34819, 34825), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((35296, 35311), 'dask.dataframe.core._concat', '_concat', (['[meta]'], {}), '([meta])\n', (35303, 35311), False, 'from dask.dataframe.core import DataFrame, Index, Series, _concat, _emulate, apply_and_enforce, has_parallel_type, new_dd_object\n'), ((2081, 2101), 'numpy.isscalar', 'np.isscalar', (['columns'], {}), '(columns)\n', (2092, 2101), True, 'import numpy as np\n'), ((10875, 10915), 'numpy.issubdtype', 'np.issubdtype', (['x.dtype[name]', 'np.string_'], {}), '(x.dtype[name], np.string_)\n', (10888, 10915), True, 'import numpy as np\n'), ((10935, 10976), 'numpy.issubdtype', 'np.issubdtype', (['x.dtype[name]', 'np.unicode_'], {}), '(x.dtype[name], np.unicode_)\n', (10948, 10976), True, 'import numpy as np\n'), ((10996, 11036), 'numpy.issubdtype', 'np.issubdtype', (['x.dtype[name]', 'np.object_'], {}), '(x.dtype[name], np.object_)\n', (11009, 11036), True, 'import numpy as np\n'), ((11386, 11413), 'os.path.getmtime', 'os.path.getmtime', (['x.rootdir'], {}), '(x.rootdir)\n', (11402, 11413), False, 'import os\n'), ((14455, 14496), 'pandas.Series', 'pd.Series', (['chunk'], {'name': 'columns', 'index': 'idx'}), '(chunk, name=columns, index=idx)\n', (14464, 14496), True, 'import pandas as pd\n'), ((23732, 23750), 'dask.delayed.delayed', 'delayed', (['make_meta'], {}), '(make_meta)\n', (23739, 23750), False, 'from dask.delayed import delayed\n'), ((24268, 24323), 'functools.partial', 'partial', (['check_meta'], {'meta': 'meta', 'funcname': '"""from_delayed"""'}), "(check_meta, meta=meta, funcname='from_delayed')\n", (24275, 24323), False, 'from functools import partial\n'), ((34829, 34847), 'dask.utils.is_arraylike', 'is_arraylike', (['meta'], {}), '(meta)\n', (34841, 34847), False, 'from dask.utils import M, _deprecated, funcname, is_arraylike\n'), ((12287, 12306), 'dask.array.percentile', 'da.percentile', (['a', 'q'], {}), '(a, q)\n', (12300, 12306), True, 'import dask.array as da\n'), ((18787, 18826), 'dask.blockwise.BlockwiseDepDict', 'BlockwiseDepDict', ([], {'mapping': 'index_mapping'}), '(mapping=index_mapping)\n', (18803, 18826), False, 'from dask.blockwise import BlockwiseDepDict, blockwise\n'), ((23622, 23640), 'dask.delayed.delayed', 'delayed', (['make_meta'], {}), '(make_meta)\n', (23629, 23640), False, 'from dask.delayed import delayed\n'), ((2328, 2355), 'numpy.array', 'np.array', (['[]'], {'dtype': 'x.dtype'}), '([], dtype=x.dtype)\n', (2336, 2355), True, 'import numpy as np\n'), ((11166, 11178), 'dask.array.unique', 'da.unique', (['a'], {}), '(a)\n', (11175, 11178), True, 'import dask.array as da\n'), ((14304, 14347), 'numpy.searchsorted', 'np.searchsorted', (['categories[columns]', 'chunk'], {}), '(categories[columns], chunk)\n', (14319, 14347), True, 'import numpy as np\n'), ((13728, 13768), 'numpy.searchsorted', 'np.searchsorted', (['categories[name]', 'chunk'], {}), '(categories[name], chunk)\n', (13743, 13768), True, 'import numpy as np\n')] |
# Copyright 2015-2016 Stanford University
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http:#www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
from collections import Counter
import numpy as np
import pandas as pd
from sklearn.utils import resample
from ..util.log import *
CAT = 0 # categorical data type
NUM = 1 # numeric data type
ID = 2 # identifier (to be ignored)
NUM_RES = 3 # numeric response
CAT_RES = 4 # categorical response
# Splits the dataset into two randomly selected sets according to
# the given proportion.
#
# parameters/returns:
# df : pandas.DataFrame
# prop : float (the proportion of training points, typically ~70%)
# return : (DataTable, DataTable) (the (training, test) datasets)
def split(df, prop):
# Checks
if prop < 0.0 or prop > 1.0:
raise Exception('Invalid proportion: ' + str(prop))
# Step 1: Shuffle the row indices
rows = [i for i in range(len(df))]
random.shuffle(rows)
# Step 2: Split into training and test rows
splitPoint = int(prop * len(df))
trainRows = rows[:splitPoint]
testRows = rows[splitPoint:]
# Step 2: Split data frame into train and test sets
trainDf = df.iloc[trainRows, :]
testDf = df.iloc[testRows, :]
return (trainDf, testDf)
def constructDataMatrix(df, res, catFeats, resampler=None):
# Step 1: Construct covariate and response columns
covCols = []
resCols = []
catFeatIndices = [[] for cFeature in catFeats]
numericFeatIndices = []
for i in range(len(df.columns)):
log('i:' + str(i) + ' df.columns[i]:' + str(df.columns[i]), INFO)
if df.columns[i] != res:
categorical = False
for j in range(len(catFeats)):
cF = catFeats[j]
if str(df.columns[i]).startswith(str(cF) + '_'):
categorical = True
catFeatIndices[j].append(len(covCols))
log('i:' + str(i) + ' df.columns[i]:' +
str(df.columns[i]) + ' catFeat:' + str(cF), INFO)
if not categorical:
numericFeatIndices.append(len(covCols))
covCols.append(i)
else:
resCols.append(i)
if len(resCols) != 1:
raise Exception('Invalid columns!')
# Step 2: Construct covariate and response data frames
covDf = df.iloc[:, covCols]
resDf = df.iloc[:, resCols]
X = np.array(covDf.values)
y = np.array(resDf.values[:, 0])
if resampler:
print('Resampling dataset using: {}'.format(resampler.__name__))
ros = resampler()
print('Original dataset shape {}'.format(Counter(y)))
X, y = ros.fit_resample(X, y)
print('Resampled dataset shape {}'.format(Counter(y)))
print("catFeatIndices", catFeatIndices, numericFeatIndices)
return (X, y, catFeatIndices, numericFeatIndices)
# Parse the given CSV file and return a pandas DataFrame
# representation of the data.
#
# Note: The dataProcessors field is a list of lambdas that
# are applied to the data in each column (in particular,
# it should be the same length as the list dataTypes).
# This field can be used to preprocess the data.
#
# parameters/returns:
# path : str (path of the CSV file)
# hasHeader : bool (whether the dataset has a header to ignore)
# dataTypes : [int] (categorical, numeric, or identifier)
# return : pandas.DataFrame
def readCsv(path, hasHeader, dataTypes, headers):
# Step 1: Parse the CSV
log('Reading file: ' + path, INFO)
# Step 1a: Skip the first row if it is the header
skiprows = 1 if hasHeader else 0
# Step 1b: Initialize data structures
cur = 0
names = [] # names
dtype = {} # data types
impute = [] # impute these columns
dummies = [] # convert these columns to indicators
usecols = [] # list of columsn to use
res = None
isCatRes = None
for i in range(len(dataTypes)):
if not _isSkip(dataTypes[i]):
# Step 1c: Append the name
names.append(cur if not headers else headers[i])
# Step 1d: Construct the data type
dtype[cur if not headers else headers[i]] = _toDType(dataTypes[i])
# Step 1e: Use this column
usecols.append(i)
# Step 1f: Add to impute if numeric
if _isImpute(dataTypes[i]):
impute.append(cur if not headers else headers[i])
# Step 1g: Add to dummies if categorical
if _isDummy(dataTypes[i]):
dummies.append(cur if not headers else headers[i])
# Step 1h: Handle response
if _isResponse(dataTypes[i]):
if res != None:
raise Exception('Multiple response variables!')
res = cur if not headers else headers[i]
isCatRes = _isCatResponse(dataTypes[i])
# Step 1i: Increment the name
cur += 1
# else:
# names.append(-1)
if res == None:
raise Exception('No response variable!')
# Step 1g: Parse the CSV
df = pd.read_csv(path, header=None, skiprows=skiprows, usecols=usecols, names=names, dtype=dtype, na_values=['?'])
# df = pd.read_csv(path, usecols=usecols, dtype=dtype, names=names, na_values=['?'])
log('Dataset shape: ' + str(df.shape), INFO)
# Step 2: Impute missing values for floating points
for i in impute:
df[i].fillna(df[i].mean(), inplace=True)
# Step 3: Convert categorical to indicator
df = pd.get_dummies(df, columns=dummies, dummy_na=True)
# Step 4: If categorical response, convert to integer
resMap = {}
if isCatRes:
# Step 4a: Construct response map
for val in df[res]:
if not val in resMap:
resMap[val] = len(resMap)
# Step 4b: Map response
df[res] = df[res].apply(lambda val: resMap[val])
log('Columns after: ' + str(len(df.columns)), INFO)
log('Column names after:\n' + ''.join((str(i) + ': ' + str(col) + '\n' for (i, col)
in zip(list(range(len(df.columns))), df.columns))), INFO)
return (df, res, resMap, dummies)
# Checks whether the datatype should be skipped (only ID).
def _isSkip(dataType):
return dataType == ID
# Checks whether the data type should be imputed.
def _isImpute(dataType):
return dataType == NUM
# Checks whether the data type should be converted from categorical to indicators.
def _isDummy(dataType):
return dataType == CAT
# Checks whether the data type is a response type.
def _isResponse(dataType):
return dataType == NUM_RES or dataType == CAT_RES
# Checks whether the data type is a categorical response.
def _isCatResponse(dataType):
return dataType == CAT_RES
# Converts a data type to a pandas type.
def _toDType(dataType):
if dataType == CAT or dataType == CAT_RES:
return str
elif dataType == NUM or dataType == NUM_RES:
return np.float64
elif dataType == ID:
raise Exception('Should not consider ID types')
else:
raise Exception('Unknown data type: ' + str(dataType))
| [
"random.shuffle",
"pandas.read_csv",
"collections.Counter",
"numpy.array",
"pandas.get_dummies"
] | [((1390, 1410), 'random.shuffle', 'random.shuffle', (['rows'], {}), '(rows)\n', (1404, 1410), False, 'import random\n'), ((2865, 2887), 'numpy.array', 'np.array', (['covDf.values'], {}), '(covDf.values)\n', (2873, 2887), True, 'import numpy as np\n'), ((2896, 2924), 'numpy.array', 'np.array', (['resDf.values[:, 0]'], {}), '(resDf.values[:, 0])\n', (2904, 2924), True, 'import numpy as np\n'), ((5557, 5671), 'pandas.read_csv', 'pd.read_csv', (['path'], {'header': 'None', 'skiprows': 'skiprows', 'usecols': 'usecols', 'names': 'names', 'dtype': 'dtype', 'na_values': "['?']"}), "(path, header=None, skiprows=skiprows, usecols=usecols, names=\n names, dtype=dtype, na_values=['?'])\n", (5568, 5671), True, 'import pandas as pd\n'), ((5990, 6040), 'pandas.get_dummies', 'pd.get_dummies', (['df'], {'columns': 'dummies', 'dummy_na': '(True)'}), '(df, columns=dummies, dummy_na=True)\n', (6004, 6040), True, 'import pandas as pd\n'), ((3092, 3102), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (3099, 3102), False, 'from collections import Counter\n'), ((3193, 3203), 'collections.Counter', 'Counter', (['y'], {}), '(y)\n', (3200, 3203), False, 'from collections import Counter\n')] |
# | <NAME>, <NAME>, <NAME> |
# | POLITECHNIKA WROCŁAWSKA |
# | WYDZIAŁ INFORMATYKI I TELEKOMUNIKACJI |
# | 2021/2022 |
import os
import numpy as np
from PyQt5.QtWidgets import QFileDialog
import helpers.load_from_mendeley as mendeley
import helpers.load_from_mitdb as mitdb
# loading data from file
def load_data(context):
# open file explorer
path = QFileDialog.getOpenFileName(context, 'Open a file', '',
'.dat .hea .mat (*.dat *.hea *.mat)')[0]
if path == '':
return np.empty(shape=[0]), np.empty(shape=[0]), np.empty(shape=[0])
extension = os.path.splitext(path)[1]
file_name = os.path.basename(path)
# load data with properly loader
if(extension == '.dat' or extension == '.hea'):
ecg, fs = mitdb.load_from_file(path=path)
elif(extension == '.mat'):
ecg, fs = mendeley.load_from_file(path=path)
else:
print('error')
return ecg, fs, file_name
| [
"helpers.load_from_mendeley.load_from_file",
"os.path.splitext",
"helpers.load_from_mitdb.load_from_file",
"numpy.empty",
"os.path.basename",
"PyQt5.QtWidgets.QFileDialog.getOpenFileName"
] | [((736, 758), 'os.path.basename', 'os.path.basename', (['path'], {}), '(path)\n', (752, 758), False, 'import os\n'), ((446, 543), 'PyQt5.QtWidgets.QFileDialog.getOpenFileName', 'QFileDialog.getOpenFileName', (['context', '"""Open a file"""', '""""""', '""".dat .hea .mat (*.dat *.hea *.mat)"""'], {}), "(context, 'Open a file', '',\n '.dat .hea .mat (*.dat *.hea *.mat)')\n", (473, 543), False, 'from PyQt5.QtWidgets import QFileDialog\n'), ((694, 716), 'os.path.splitext', 'os.path.splitext', (['path'], {}), '(path)\n', (710, 716), False, 'import os\n'), ((867, 898), 'helpers.load_from_mitdb.load_from_file', 'mitdb.load_from_file', ([], {'path': 'path'}), '(path=path)\n', (887, 898), True, 'import helpers.load_from_mitdb as mitdb\n'), ((616, 635), 'numpy.empty', 'np.empty', ([], {'shape': '[0]'}), '(shape=[0])\n', (624, 635), True, 'import numpy as np\n'), ((637, 656), 'numpy.empty', 'np.empty', ([], {'shape': '[0]'}), '(shape=[0])\n', (645, 656), True, 'import numpy as np\n'), ((658, 677), 'numpy.empty', 'np.empty', ([], {'shape': '[0]'}), '(shape=[0])\n', (666, 677), True, 'import numpy as np\n'), ((948, 982), 'helpers.load_from_mendeley.load_from_file', 'mendeley.load_from_file', ([], {'path': 'path'}), '(path=path)\n', (971, 982), True, 'import helpers.load_from_mendeley as mendeley\n')] |
'''
Created on 24-May-2018
@author: <NAME>
'''
#Import all the packages we will going to use
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
#Initialize the random number generator for reproducible results
np.random.seed(41)
tf.set_random_seed(41)
#Number of Sample points
n = 400
#Probability of distribution
p_c = 0.5
#Let's generate a binomial distribution for class variable
#It will divide the 50% samples in class 1 and 0 both
c = np.random.binomial(n=1, p=p_c, size=n)
#Our two information sources will be 2 bivariate Gaussian distribution
#So we need to define 2 mean value for each distribution
#Mean values for Visual data set
mu_v_0 = 1.0
mu_v_1 = 8.0
#Mean values for textual data set
mu_t_0 = 13.0
mu_t_1 = 19.0
#Now we will generate the two distributions here
x_v = np.random.randn(n) + np.where(c == 0, mu_v_0, mu_v_1)
x_t = np.random.randn(n) + np.where(c == 0, mu_t_0, mu_t_1)
#Let's normalize data with the mean value
x_v = x_v - x_v.mean()
x_t = x_t - x_t.mean()
#Visualize the two classes with the combine information
plt.scatter(x_v, x_t, c=np.where(c == 0, 'blue', 'red'))
plt.xlabel('visual modality')
plt.ylabel('textual modality');
plt.show()
#Define number of points in the sample set
resolution = 1000
#Create a linear sample set from visual information distribution
vs = np.linspace(x_v.min(), x_v.max(), resolution)
#Create linear sample set from textual information distribution
ts = np.linspace(x_t.min(), x_t.max(), resolution)
#In following lines we will propagate these sample point to create
#proper data set, it will help to create pair of both the information
vs, ts = np.meshgrid(vs, ts)
#Here we will flatten our arrays
vs = np.ravel(vs)
ts = np.ravel(ts)
#Let's start with creating variables
#It will store the visual information
visual = tf.placeholder(tf.float32, shape=[None])
#This will store textual information
textual = tf.placeholder(tf.float32, shape=[None])
#And the final one will be responsible for holding class variable
target = tf.placeholder(tf.int32, shape=[None])
#As we are working wit a binary problem
NUM_CLASSES = 2
#We will use fixed number of neuron for every layer
HIDDEN_LAYER_DIM = 1
#This is our Visual feature extractor,
#It will be responsible for extraction of useful features,
#from visual samples, we will use tanh as activation function.
h_v = tf.layers.dense(tf.reshape(visual, [-1, 1]),
HIDDEN_LAYER_DIM,
activation=tf.nn.tanh)
#This is our Textual feature extractor,
#It will be responsible for extraction of useful features,
#from visual samples, we will use tanh as activation function.
h_t = tf.layers.dense(tf.reshape(textual, [-1, 1]),
HIDDEN_LAYER_DIM,
activation=tf.nn.tanh)
#Now as we have features from both the sources,
#we will fuse the information from both the sources,
#by creating a stack, this will be our aggregator network
fuse = tf.layers.dense(tf.stack([h_v, h_t], axis=1),
HIDDEN_LAYER_DIM,
activation=tf.nn.tanh)
#Flatten the data here
fuse = tf.layers.flatten(fuse)
#Following layers are the part of the same aggregator network
z = tf.layers.dense(fuse,HIDDEN_LAYER_DIM,activation=tf.nn.sigmoid)
#This one is our final dens layer which used to convert network output
#for the two class
logits = tf.layers.dense(z, NUM_CLASSES)
#We want probabilities at the output, sigmoid will help us
prob = tf.nn.sigmoid(logits)
#We will use Sigmoid cross entropy as the loss function
loss = tf.losses.sigmoid_cross_entropy(multi_class_labels=
tf.one_hot(target, depth=2),
logits=logits)
#Here we optimize the loss
optimizer = tf.train.AdamOptimizer(learning_rate=0.1)
train_op = optimizer.minimize(loss)
def train(train_op, loss,sess):
#TRAIN_OP: optimizer
#LOSS: calculated loss
#SESS: Tensorflow session
#First initialize all the variables created
sess.run(tf.global_variables_initializer())
#We will monitor the loss through each epoch
losses = []
#Let's run the optimization for 100 epochs
for epoch in range(100):
_, l = sess.run([train_op, loss], {visual: x_v,
textual: x_t,
target: c})
losses.append(l)
#Here we will plot the training loss
plt.plot(losses, label='loss')
plt.title('loss')
#Create a tensorflow session
sess = tf.Session()
#Start training of the network
train(train_op, loss,sess)
#Run the session
zs, probs = sess.run([z, prob], {visual: vs, textual: ts})
def plot_evaluations(evaluation, cmap, title, labels):
#EVALUATION: Probability op from network
#CMAP: colormap options
#TITLE: plot title
#LABELS: Class labels
#First we will plot our distributions as we have done previously
plt.scatter(((x_v - x_v.min()) * resolution / (x_v - x_v.min()).max()),
((x_t - x_t.min()) * resolution / (x_t - x_t.min()).max()),
c=np.where(c == 0, 'blue', 'red'))
#Give the titles to our plots with labeling the axes
plt.title(title, fontsize=14)
plt.xlabel('visual modality')
plt.ylabel('textual modality')
#Here we will create a color map to draw the boundaries
plt.imshow(evaluation.reshape([resolution, resolution]),
origin='lower',
cmap=cmap,
alpha=0.5)
#Let's put a color bar to create a fancy looking plot
cbar = plt.colorbar(ticks=[evaluation.min(), evaluation.max()])
cbar.ax.set_yticklabels(labels)
cbar.ax.tick_params(labelsize=13)
#We will plot the probabilities
plot_evaluations(probs[:, 1],
cmap='bwr',
title='$C$ prediction',
labels=['$C=0$', '$C=1$'])
#Show the plots over here
plt.show() | [
"tensorflow.layers.flatten",
"matplotlib.pyplot.ylabel",
"tensorflow.set_random_seed",
"numpy.random.binomial",
"numpy.where",
"matplotlib.pyplot.xlabel",
"tensorflow.placeholder",
"tensorflow.Session",
"matplotlib.pyplot.plot",
"tensorflow.nn.sigmoid",
"numpy.random.seed",
"numpy.meshgrid",
... | [((237, 255), 'numpy.random.seed', 'np.random.seed', (['(41)'], {}), '(41)\n', (251, 255), True, 'import numpy as np\n'), ((256, 278), 'tensorflow.set_random_seed', 'tf.set_random_seed', (['(41)'], {}), '(41)\n', (274, 278), True, 'import tensorflow as tf\n'), ((471, 509), 'numpy.random.binomial', 'np.random.binomial', ([], {'n': '(1)', 'p': 'p_c', 'size': 'n'}), '(n=1, p=p_c, size=n)\n', (489, 509), True, 'import numpy as np\n'), ((1135, 1164), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""visual modality"""'], {}), "('visual modality')\n", (1145, 1164), True, 'import matplotlib.pyplot as plt\n'), ((1165, 1195), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""textual modality"""'], {}), "('textual modality')\n", (1175, 1195), True, 'import matplotlib.pyplot as plt\n'), ((1197, 1207), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1205, 1207), True, 'import matplotlib.pyplot as plt\n'), ((1652, 1671), 'numpy.meshgrid', 'np.meshgrid', (['vs', 'ts'], {}), '(vs, ts)\n', (1663, 1671), True, 'import numpy as np\n'), ((1711, 1723), 'numpy.ravel', 'np.ravel', (['vs'], {}), '(vs)\n', (1719, 1723), True, 'import numpy as np\n'), ((1729, 1741), 'numpy.ravel', 'np.ravel', (['ts'], {}), '(ts)\n', (1737, 1741), True, 'import numpy as np\n'), ((1828, 1868), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]'}), '(tf.float32, shape=[None])\n', (1842, 1868), True, 'import tensorflow as tf\n'), ((1917, 1957), 'tensorflow.placeholder', 'tf.placeholder', (['tf.float32'], {'shape': '[None]'}), '(tf.float32, shape=[None])\n', (1931, 1957), True, 'import tensorflow as tf\n'), ((2034, 2072), 'tensorflow.placeholder', 'tf.placeholder', (['tf.int32'], {'shape': '[None]'}), '(tf.int32, shape=[None])\n', (2048, 2072), True, 'import tensorflow as tf\n'), ((3132, 3155), 'tensorflow.layers.flatten', 'tf.layers.flatten', (['fuse'], {}), '(fuse)\n', (3149, 3155), True, 'import tensorflow as tf\n'), ((3224, 3289), 'tensorflow.layers.dense', 'tf.layers.dense', (['fuse', 'HIDDEN_LAYER_DIM'], {'activation': 'tf.nn.sigmoid'}), '(fuse, HIDDEN_LAYER_DIM, activation=tf.nn.sigmoid)\n', (3239, 3289), True, 'import tensorflow as tf\n'), ((3388, 3419), 'tensorflow.layers.dense', 'tf.layers.dense', (['z', 'NUM_CLASSES'], {}), '(z, NUM_CLASSES)\n', (3403, 3419), True, 'import tensorflow as tf\n'), ((3487, 3508), 'tensorflow.nn.sigmoid', 'tf.nn.sigmoid', (['logits'], {}), '(logits)\n', (3500, 3508), True, 'import tensorflow as tf\n'), ((3787, 3828), 'tensorflow.train.AdamOptimizer', 'tf.train.AdamOptimizer', ([], {'learning_rate': '(0.1)'}), '(learning_rate=0.1)\n', (3809, 3828), True, 'import tensorflow as tf\n'), ((4577, 4589), 'tensorflow.Session', 'tf.Session', ([], {}), '()\n', (4587, 4589), True, 'import tensorflow as tf\n'), ((5978, 5988), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (5986, 5988), True, 'import matplotlib.pyplot as plt\n'), ((818, 836), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (833, 836), True, 'import numpy as np\n'), ((839, 871), 'numpy.where', 'np.where', (['(c == 0)', 'mu_v_0', 'mu_v_1'], {}), '(c == 0, mu_v_0, mu_v_1)\n', (847, 871), True, 'import numpy as np\n'), ((878, 896), 'numpy.random.randn', 'np.random.randn', (['n'], {}), '(n)\n', (893, 896), True, 'import numpy as np\n'), ((899, 931), 'numpy.where', 'np.where', (['(c == 0)', 'mu_t_0', 'mu_t_1'], {}), '(c == 0, mu_t_0, mu_t_1)\n', (907, 931), True, 'import numpy as np\n'), ((2391, 2418), 'tensorflow.reshape', 'tf.reshape', (['visual', '[-1, 1]'], {}), '(visual, [-1, 1])\n', (2401, 2418), True, 'import tensorflow as tf\n'), ((2690, 2718), 'tensorflow.reshape', 'tf.reshape', (['textual', '[-1, 1]'], {}), '(textual, [-1, 1])\n', (2700, 2718), True, 'import tensorflow as tf\n'), ((2989, 3017), 'tensorflow.stack', 'tf.stack', (['[h_v, h_t]'], {'axis': '(1)'}), '([h_v, h_t], axis=1)\n', (2997, 3017), True, 'import tensorflow as tf\n'), ((4482, 4512), 'matplotlib.pyplot.plot', 'plt.plot', (['losses'], {'label': '"""loss"""'}), "(losses, label='loss')\n", (4490, 4512), True, 'import matplotlib.pyplot as plt\n'), ((4517, 4534), 'matplotlib.pyplot.title', 'plt.title', (['"""loss"""'], {}), "('loss')\n", (4526, 4534), True, 'import matplotlib.pyplot as plt\n'), ((5257, 5286), 'matplotlib.pyplot.title', 'plt.title', (['title'], {'fontsize': '(14)'}), '(title, fontsize=14)\n', (5266, 5286), True, 'import matplotlib.pyplot as plt\n'), ((5291, 5320), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""visual modality"""'], {}), "('visual modality')\n", (5301, 5320), True, 'import matplotlib.pyplot as plt\n'), ((5325, 5355), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""textual modality"""'], {}), "('textual modality')\n", (5335, 5355), True, 'import matplotlib.pyplot as plt\n'), ((1102, 1133), 'numpy.where', 'np.where', (['(c == 0)', '"""blue"""', '"""red"""'], {}), "(c == 0, 'blue', 'red')\n", (1110, 1133), True, 'import numpy as np\n'), ((3664, 3691), 'tensorflow.one_hot', 'tf.one_hot', (['target'], {'depth': '(2)'}), '(target, depth=2)\n', (3674, 3691), True, 'import tensorflow as tf\n'), ((4052, 4085), 'tensorflow.global_variables_initializer', 'tf.global_variables_initializer', ([], {}), '()\n', (4083, 4085), True, 'import tensorflow as tf\n'), ((5158, 5189), 'numpy.where', 'np.where', (['(c == 0)', '"""blue"""', '"""red"""'], {}), "(c == 0, 'blue', 'red')\n", (5166, 5189), True, 'import numpy as np\n')] |
import os
import sys
sys.path.append('..')
from beepose.utils.util import NumpyEncoder, rotate_bound2,distance_point,distance_line_point, read_json,save_json, dets2boxes,boxes2dets,non_max_suppression_slow
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import json
import glob,os
import pylab
import cv2
from keras.models import load_model
import argparse
import math
FPS = 20.0
def pollen_samples(mappings,annotations):
ids=list(mappings.keys())
pollen_ids={}
for idx in ids:
pol = annotations[annotations['#frame']==idx]
ps=pol[pol['pollen']==1][['tagx','tagy']]
if len(ps)>0:
pollen_ids[idx]=ps.values
return pollen_ids
def npollen_samples(mappings,annotations):
ids=list(mappings.keys())
pollen_ids={}
for idx in ids:
pol = annotations[annotations['#frame']==idx]
ps=pol[pol['pollen']==0][['tagx','tagy']]
if len(ps)>0:
pollen_ids[idx]=ps.values
return pollen_ids
def find_matchings_pollen(mappings,pollen_samples):
matching_pollen ={}
for p in pollen_samples.keys():
ps=pollen_samples[p]
mappings_frame = mappings[p]
pol=ps[0]
minimum = 15000
for mapping in mappings_frame:
d = (distance_line_point(mapping, pol) + distance_point(mapping[0],pol)+distance_point(mapping[1],pol))/3
if d <minimum:
minimum = d
matching_pollen[p]=mapping
return matching_pollen
def pollen_pipeling(mappings, annotations):
pollen_samples=pollen_samples(mappings,annotations)
matchings =find_matchings_pollen(mappings,pollen_samples)
return matchings
def filter_trk(trk,threshold=2):
counter={}
for k in range(int(np.array(trk).max())):
counter[k+1]=0
for f in range(len(trk)):
for j in trk[f]:
if j>0:
counter[int(j)]+=1
filtered=[]
for k in counter.keys():
if counter[k]<threshold:
filtered.append(k)
return filtered
def entering(event):
d=event['data']
ent=[]
for frame in d:
for e in d[frame]:
if e['labels']=='entering':
ent.append(int(float(e['id'])))
return ent
def load_for_pollen(folder,detections_path,track_path,video_path,folder_video ='/mnt/storage/Gurabo/videos/Gurabo/mp4', model_json='../BeeLab/2l_model.json',model_weights='../BeeLab/2l_model.h5'):
path=detections_path
detections = read_json(os.path.join(folder,path))
path2=track_path
trk= read_json(os.path.join(folder,path2)) # raw tracking
video =video_path
vidcap = cv2.VideoCapture(video)
model=load_model(model_json,model_weights)
return path,detections,trk,vidcap,model
def pollen_classifier_fragment(detections_file,trk_file,video_file,model_file,gpu,gpu_fraction,trk_pollen_name,start=0,limit=72000):
"""
Function to process a fragment of a video with pollen detection. The model takes as input
Inputs :
- detections_file : path to detection file.
- trk_file : path to tracking file
- model_file : path to the model to be used for prediction
- gpu : Number id of the gpu to use.
- gpu_fraction : fraction of the gpu
"""
if type(gpu)==int:
os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID" # see issue #152
os.environ["CUDA_VISIBLE_DEVICES"]="%d"%gpu
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.gpu_options.per_process_gpu_memory_fraction = gpu_fraction
session = tf.Session(config=config)
folder = '/'.join(detections_file.split('/')[:-1])
model = load_model(model_file,compile=False)
print(detections_file)
print(trk_file)
print(video_file)
detections = read_json(detections_file)
trk = read_json(trk_file)
print('Staring trk pollen classifier')
trk_pollen={}
for trk_frame in trk:
for t in trk_frame:
trk_pollen[t]=[]
vidcap = cv2.VideoCapture(video_file)
vidcap.set(cv2.CAP_PROP_POS_MSEC,start*1000.0/FPS)
for i,k in enumerate(range(start,limit)):
if k%500 ==0:
print(k)
if k%1000==0 and k>0:
print('checkpoint at frame:',k)
save_json(os.path.join(folder,trk_pollen_name),trk_pollen)
try:
mappings=detections[str(k)]['mapping']
parts=detections[str(k)]['parts']['1']
dets = detections[str(k)]['parts']['2']
boxes = dets2boxes(dets,size=20)
dets = boxes2dets(non_max_suppression_slow(boxes,0.6)[::-1])
except:
print('Problem with ',k ,str(k))
continue
thrx = [t[:2] for t in dets]
success,image = vidcap.read()
RGB=image
for p in parts:
if p[0]<350 or p[0]> 2200 or p[1]>1200 :continue
for m in mappings:
if m[-1][0]==3 and m[-1][1]==2 and m[1]==p[:2]:
thorax = m[0]
try:
idx=thrx.index(thorax)
except:
print('thorax not found')
continue
t_id=trk[int(k)][idx]
#print(t_id)
for m in mappings:
if m[-1][0]==1 and m[-1][1]==2 and m[1]==p[:2]:
if m[0][1]<100:continue
rectangle=[250,300]
myradians = math.atan2(m[0][1]-m[1][1] ,m[0][0]-m[1][0])
angle=math.degrees(myradians)-90
im=rotate_bound2(RGB,((m[0][0]+m[1][0])/2),((m[0][1]+m[1][1])/2),angle,rectangle[0],rectangle[1])
im=cv2.cvtColor(im,cv2.COLOR_BGR2RGB)
score=model.predict(np.array([cv2.resize(im,(180,300))]))
if score[0][1]>0.5:
#plt.title(str(1)+'_'+str(t_id)+'_'+str(score[0][1])+'_'+str(k))
trk_pollen[t_id].append([1,score[0][1],k])
#plt.imshow(im)
#plt.show()
#clear_output(wait=True)
else:
#plt.title(str(0)+'_'+str(t_id)+'_'+str(score[0][1])+'_'+str(k))
trk_pollen[t_id].append([0,score[0][0],k])
#plt.imshow(im)
#plt.show()
#clear_output(wait=True)
print('checkpoint at frame:',k)
save_json(os.path.join(folder,trk_pollen_name),trk_pollen)
def launch_pollen_folder(folder,folder_video,pathportion,division):
"""
This function process all the videos in a given folder.
"""
files = glob.glob(os.path.join(folder,'merged*.json'))
total = len(files)
init = int((portion-1)*division*total)
end = int(portion*division*total)
print(total,init,end)
print(files)
processed = glob.glob(os.path.join(folder,'trk_pollen_raw_*.json'))
processed_files = [f.split('/')[-1] for f in processed]
for file in files[init:end]:
print(file)
path,detections,trk,vidcap,model = load_for_pollen(folder,folder_video=folder_video)
pollen_classifier(trk,folder,path,detections,vidcap,model)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--folder', type=str, default ='detections/one_week/', help='input detections file')
parser.add_argument('--folder_video',type=str, default= '/mnt/storage/Gurabo/videos/Gurabo/mp4',help='Folder where videos are')
parser.add_argument('--no_process_again',type=bool, default=True, help = 'If its processed not process again')
parser.add_argument('--day',type=int, default = 21, help='day to process')
parser.add_argument('--hour',type= int , default= 8, help='Hour')
parser.add_argument('--endhour',type= int , default= 18, help='Hour')
parser.add_argument('--division',type=float, default =0.25,help='divide the list to process')
parser.add_argument('--portion',type=float, default =1,help='part of the list to process')
args = parser.parse_args()
folder = args.folder
folder_video = args.folder_video
proc = args.no_process_again
day = args.day
hour = args.hour
end = args.endhour
division = args.division
portion = args.portion
| [
"beepose.utils.util.read_json",
"beepose.utils.util.non_max_suppression_slow",
"keras.models.load_model",
"argparse.ArgumentParser",
"beepose.utils.util.rotate_bound2",
"tensorflow.Session",
"beepose.utils.util.distance_point",
"os.path.join",
"math.degrees",
"numpy.array",
"beepose.utils.util.d... | [((22, 43), 'sys.path.append', 'sys.path.append', (['""".."""'], {}), "('..')\n", (37, 43), False, 'import sys\n'), ((2742, 2765), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video'], {}), '(video)\n', (2758, 2765), False, 'import cv2\n'), ((2776, 2813), 'keras.models.load_model', 'load_model', (['model_json', 'model_weights'], {}), '(model_json, model_weights)\n', (2786, 2813), False, 'from keras.models import load_model\n'), ((3550, 3566), 'tensorflow.ConfigProto', 'tf.ConfigProto', ([], {}), '()\n', (3564, 3566), True, 'import tensorflow as tf\n'), ((3694, 3719), 'tensorflow.Session', 'tf.Session', ([], {'config': 'config'}), '(config=config)\n', (3704, 3719), True, 'import tensorflow as tf\n'), ((3787, 3824), 'keras.models.load_model', 'load_model', (['model_file'], {'compile': '(False)'}), '(model_file, compile=False)\n', (3797, 3824), False, 'from keras.models import load_model\n'), ((3924, 3950), 'beepose.utils.util.read_json', 'read_json', (['detections_file'], {}), '(detections_file)\n', (3933, 3950), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((3966, 3985), 'beepose.utils.util.read_json', 'read_json', (['trk_file'], {}), '(trk_file)\n', (3975, 3985), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((4187, 4215), 'cv2.VideoCapture', 'cv2.VideoCapture', (['video_file'], {}), '(video_file)\n', (4203, 4215), False, 'import cv2\n'), ((7764, 7789), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (7787, 7789), False, 'import argparse\n'), ((2595, 2621), 'os.path.join', 'os.path.join', (['folder', 'path'], {}), '(folder, path)\n', (2607, 2621), False, 'import glob, os\n'), ((2662, 2689), 'os.path.join', 'os.path.join', (['folder', 'path2'], {}), '(folder, path2)\n', (2674, 2689), False, 'import glob, os\n'), ((6897, 6934), 'os.path.join', 'os.path.join', (['folder', 'trk_pollen_name'], {}), '(folder, trk_pollen_name)\n', (6909, 6934), False, 'import glob, os\n'), ((7129, 7165), 'os.path.join', 'os.path.join', (['folder', '"""merged*.json"""'], {}), "(folder, 'merged*.json')\n", (7141, 7165), False, 'import glob, os\n'), ((7339, 7384), 'os.path.join', 'os.path.join', (['folder', '"""trk_pollen_raw_*.json"""'], {}), "(folder, 'trk_pollen_raw_*.json')\n", (7351, 7384), False, 'import glob, os\n'), ((4697, 4722), 'beepose.utils.util.dets2boxes', 'dets2boxes', (['dets'], {'size': '(20)'}), '(dets, size=20)\n', (4707, 4722), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((4461, 4498), 'os.path.join', 'os.path.join', (['folder', 'trk_pollen_name'], {}), '(folder, trk_pollen_name)\n', (4473, 4498), False, 'import glob, os\n'), ((1390, 1421), 'beepose.utils.util.distance_point', 'distance_point', (['mapping[1]', 'pol'], {}), '(mapping[1], pol)\n', (1404, 1421), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((1834, 1847), 'numpy.array', 'np.array', (['trk'], {}), '(trk)\n', (1842, 1847), True, 'import numpy as np\n'), ((4752, 4788), 'beepose.utils.util.non_max_suppression_slow', 'non_max_suppression_slow', (['boxes', '(0.6)'], {}), '(boxes, 0.6)\n', (4776, 4788), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((1323, 1356), 'beepose.utils.util.distance_line_point', 'distance_line_point', (['mapping', 'pol'], {}), '(mapping, pol)\n', (1342, 1356), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((1359, 1390), 'beepose.utils.util.distance_point', 'distance_point', (['mapping[0]', 'pol'], {}), '(mapping[0], pol)\n', (1373, 1390), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((5704, 5752), 'math.atan2', 'math.atan2', (['(m[0][1] - m[1][1])', '(m[0][0] - m[1][0])'], {}), '(m[0][1] - m[1][1], m[0][0] - m[1][0])\n', (5714, 5752), False, 'import math\n'), ((5842, 5949), 'beepose.utils.util.rotate_bound2', 'rotate_bound2', (['RGB', '((m[0][0] + m[1][0]) / 2)', '((m[0][1] + m[1][1]) / 2)', 'angle', 'rectangle[0]', 'rectangle[1]'], {}), '(RGB, (m[0][0] + m[1][0]) / 2, (m[0][1] + m[1][1]) / 2, angle,\n rectangle[0], rectangle[1])\n', (5855, 5949), False, 'from beepose.utils.util import NumpyEncoder, rotate_bound2, distance_point, distance_line_point, read_json, save_json, dets2boxes, boxes2dets, non_max_suppression_slow\n'), ((5968, 6003), 'cv2.cvtColor', 'cv2.cvtColor', (['im', 'cv2.COLOR_BGR2RGB'], {}), '(im, cv2.COLOR_BGR2RGB)\n', (5980, 6003), False, 'import cv2\n'), ((5784, 5807), 'math.degrees', 'math.degrees', (['myradians'], {}), '(myradians)\n', (5796, 5807), False, 'import math\n'), ((6061, 6087), 'cv2.resize', 'cv2.resize', (['im', '(180, 300)'], {}), '(im, (180, 300))\n', (6071, 6087), False, 'import cv2\n')] |
#!/software/anaconda3.6/bin/python
from mpi4py import MPI
import os
import pickle
from OpSim import OpSim
from astropy.coordinates import SkyCoord
from astropy import units
import numpy as np
if __name__ == "__main__":
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
sendbuf = None
root = 0
#nfields = 2295# "primary" fields
nfields = 3339# all observed fields
#nfields = 5292 #total number of fields from OpSim
nfieldsPerCore = int(np.floor(nfields/size))
print(f"nfields={nfields}, nfieldsPerCore={nfieldsPerCore}")
sendbuf = np.empty((size, nfieldsPerCore), dtype='float64')
recvbuf = np.empty(nfieldsPerCore, dtype='float64')
if (rank == root):
if not os.path.exists('output_files'):
os.makedirs('output_files')
OpS = OpSim()
OpS.dbFile = '/projects/p30137/ageller/EBLSST/input/db/minion_1016_sqlite.db' #for the OpSim database
OpS.getAllOpSimFields()
#primary = np.where(OpS.Nobs > 800)
#OpS.fieldID = OpS.fieldID[primary]
observed = np.where(OpS.Nobs > 0)
OpS.fieldID = OpS.fieldID[observed]
nfields = len(OpS.fieldID)
print(f"rank 0 nfields={nfields}")
print(OpS.fieldID)
#scatter the fieldID
#get as close as we can to having everything scattered
maxIndex = min(nfieldsPerCore*size, nfields-1)
output = OpS.fieldID[:maxIndex].T
print("reshaping to send to other processes")
sendbuf = np.reshape(output, (size, nfieldsPerCore))
#scatter to the all of the processes
comm.Scatter(sendbuf, recvbuf, root=root)
#now reshape again to get back to the right format
fieldData = np.reshape(recvbuf, (nfieldsPerCore, 1))
#add on any extra fields to rank =0
if (rank == 0):
if (nfieldsPerCore*size < nfields):
print("adding to rank 0")
extra = OpS.fieldID[maxIndex:].T
fieldData = np.append(fieldData, extra)
#redefine the OpSim fieldID and the run through the rest of the code
fields = fieldData.T
OpS = OpSim()
OpS.dbFile = '/projects/p30137/ageller/EBLSST/input/db/minion_1016_sqlite.db' #for the OpSim database
OpS.getCursors()
OpS.fieldID = np.squeeze(fields)
OpS.obsDates = np.full_like(OpS.fieldID, dict(), dtype=dict)
OpS.NobsDates = np.full_like(OpS.fieldID, dict(), dtype=dict)
OpS.m_5 = np.full_like(OpS.fieldID, dict(), dtype=dict)
OpS.totalNobs = np.full_like(OpS.fieldID, 0)
filters = ['u_', 'g_', 'r_', 'i_', 'z_', 'y_']
Nbins = 100
OpSimi = fieldData.astype("int")
OpS.dt = np.array([{} for i in OpSimi])
for i, ID in enumerate(OpSimi):
print(rank, i, ID)
dt = {}
OpS.setDates(i, filters)
for f in filters:
dt[f] = np.diff(OpS.obsDates[i][f])
OpS.dt[i] = dt
dist = {}
for f in filters:
pdf,bin_edges = np.histogram(np.log10(OpS.dt[i][f], where=(OpS.dt[i][f] > 0)), bins=Nbins)
bins = bin_edges[:-1] + np.diff(bin_edges)/2.
cdf = np.cumsum(pdf)
dist[f] = {}
dist[f]['bins']= bins
dist[f]['cdf']= cdf
dist[f]['pdf']= pdf
oname = 'output_files/'+str(int(OpS.fieldID[i])).zfill(4) + "dist.pickle"
pickle.dump( dist, open( oname, "wb" ) )
| [
"os.path.exists",
"numpy.log10",
"numpy.reshape",
"numpy.full_like",
"os.makedirs",
"numpy.where",
"numpy.floor",
"numpy.diff",
"numpy.squeeze",
"numpy.append",
"numpy.array",
"numpy.empty",
"numpy.cumsum",
"OpSim.OpSim"
] | [((568, 617), 'numpy.empty', 'np.empty', (['(size, nfieldsPerCore)'], {'dtype': '"""float64"""'}), "((size, nfieldsPerCore), dtype='float64')\n", (576, 617), True, 'import numpy as np\n'), ((629, 670), 'numpy.empty', 'np.empty', (['nfieldsPerCore'], {'dtype': '"""float64"""'}), "(nfieldsPerCore, dtype='float64')\n", (637, 670), True, 'import numpy as np\n'), ((1571, 1611), 'numpy.reshape', 'np.reshape', (['recvbuf', '(nfieldsPerCore, 1)'], {}), '(recvbuf, (nfieldsPerCore, 1))\n', (1581, 1611), True, 'import numpy as np\n'), ((1914, 1921), 'OpSim.OpSim', 'OpSim', ([], {}), '()\n', (1919, 1921), False, 'from OpSim import OpSim\n'), ((2059, 2077), 'numpy.squeeze', 'np.squeeze', (['fields'], {}), '(fields)\n', (2069, 2077), True, 'import numpy as np\n'), ((2277, 2305), 'numpy.full_like', 'np.full_like', (['OpS.fieldID', '(0)'], {}), '(OpS.fieldID, 0)\n', (2289, 2305), True, 'import numpy as np\n'), ((2412, 2442), 'numpy.array', 'np.array', (['[{} for i in OpSimi]'], {}), '([{} for i in OpSimi])\n', (2420, 2442), True, 'import numpy as np\n'), ((470, 494), 'numpy.floor', 'np.floor', (['(nfields / size)'], {}), '(nfields / size)\n', (478, 494), True, 'import numpy as np\n'), ((774, 781), 'OpSim.OpSim', 'OpSim', ([], {}), '()\n', (779, 781), False, 'from OpSim import OpSim\n'), ((1003, 1025), 'numpy.where', 'np.where', (['(OpS.Nobs > 0)'], {}), '(OpS.Nobs > 0)\n', (1011, 1025), True, 'import numpy as np\n'), ((1380, 1422), 'numpy.reshape', 'np.reshape', (['output', '(size, nfieldsPerCore)'], {}), '(output, (size, nfieldsPerCore))\n', (1390, 1422), True, 'import numpy as np\n'), ((702, 732), 'os.path.exists', 'os.path.exists', (['"""output_files"""'], {}), "('output_files')\n", (716, 732), False, 'import os\n'), ((737, 764), 'os.makedirs', 'os.makedirs', (['"""output_files"""'], {}), "('output_files')\n", (748, 764), False, 'import os\n'), ((1786, 1813), 'numpy.append', 'np.append', (['fieldData', 'extra'], {}), '(fieldData, extra)\n', (1795, 1813), True, 'import numpy as np\n'), ((2566, 2593), 'numpy.diff', 'np.diff', (['OpS.obsDates[i][f]'], {}), '(OpS.obsDates[i][f])\n', (2573, 2593), True, 'import numpy as np\n'), ((2797, 2811), 'numpy.cumsum', 'np.cumsum', (['pdf'], {}), '(pdf)\n', (2806, 2811), True, 'import numpy as np\n'), ((2676, 2722), 'numpy.log10', 'np.log10', (['OpS.dt[i][f]'], {'where': '(OpS.dt[i][f] > 0)'}), '(OpS.dt[i][f], where=OpS.dt[i][f] > 0)\n', (2684, 2722), True, 'import numpy as np\n'), ((2766, 2784), 'numpy.diff', 'np.diff', (['bin_edges'], {}), '(bin_edges)\n', (2773, 2784), True, 'import numpy as np\n')] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.